[Intel-gfx] [PATCH] i915: add page flipping support

Eric Anholt eric at anholt.net
Wed Apr 22 04:48:39 CEST 2009


On Tue, 2009-04-14 at 15:22 -0700, Jesse Barnes wrote:
> Add a new page flipping ioctl to the i915 driver, using KMS
> functionality.
> 
> Internally, the new flip ioctl will use the mode_set_base function,
> which will simply update the front buffer pointer, taking care to pin
> the new buffer and unpin the old one.  This means userspace needs to
> create an FB id for the new front buffer prior to calling in.
> 
> The flip will take place at the next vblank period automatically (since
> the display base regs are double buffered), and any drawing to a
> buffer with a pending flip will be blocked at execbuf time until the
> next vblank interrupt fires and runs its completion handler.

What's ensuring that the rendering is complete before you queue the flip
to the new buffer?  The lock breaking in list walking at idle is also
making me uncomfortable.

> diff --git a/drivers/gpu/drm/i915/i915_dma.c
> b/drivers/gpu/drm/i915/i915_dma.c index c23b3a9..5273ce0 100644
> --- a/drivers/gpu/drm/i915/i915_dma.c
> +++ b/drivers/gpu/drm/i915/i915_dma.c
> @@ -871,6 +871,127 @@ static int i915_set_status_page(struct drm_device
> *dev, void *data, return 0;
>  }
>  
> +bool
> +i915_gem_flip_pending(struct drm_gem_object *obj)
> +{
> +	struct drm_device *dev = obj->dev;
> +	drm_i915_private_t *dev_priv = dev->dev_private;
> +	struct drm_i915_gem_object *obj_priv = obj->driver_private;
> +	unsigned long flags;
> +	bool pending;
> +
> +	spin_lock_irqsave(&dev_priv->vblank_lock, flags);
> +	pending = !list_empty(&obj_priv->vblank_head);
> +	spin_unlock_irqrestore(&dev_priv->vblank_lock, flags);
> +
> +	return pending;
> +}
> +
> +static int i915_gem_page_flip(struct drm_device *dev, void *data,
> +			      struct drm_file *file_priv)
> +{
> +	struct drm_i915_gem_page_flip *flip_data = data;
> +	drm_i915_private_t *dev_priv = dev->dev_private;
> +	struct drm_mode_object *drm_obj;
> +	struct drm_crtc *crtc;
> +	struct intel_crtc *intel_crtc;
> +	struct intel_framebuffer *intel_fb;
> +	struct drm_framebuffer *old_fb;
> +	struct drm_i915_gem_object *obj_priv;
> +	unsigned long flags;
> +	unsigned int pipe;
> +	int ret = 0, seqno;
> +	struct drm_crtc_helper_funcs *crtc_funcs;
> +
> +	if (!(drm_core_check_feature(dev, DRIVER_MODESET)))
> +		return -ENODEV;
> +
> +	/*
> +	 * Reject unknown flags so future userspace knows what we
> (don't)
> +	 * support
> +	 */
> +	if (flip_data->flags & (~I915_PAGE_FLIP_WAIT)) {
> +		DRM_ERROR("bad page flip flags\n");
> +		return -EINVAL;
> +	}
> +
> +	mutex_lock(&dev->struct_mutex);
> +
> +	/* Find the CRTC & FB ids */
> +	drm_obj = drm_mode_object_find(dev, flip_data->crtc_id,
> +				   DRM_MODE_OBJECT_CRTC);
> +	if (!drm_obj) {
> +		DRM_DEBUG("unknown crtc %d\n", flip_data->crtc_id);
> +		ret = -EINVAL;
> +		goto out_unlock;
> +	}
> +	crtc = obj_to_crtc(drm_obj);
> +	if (!crtc->enabled) {
> +		DRM_ERROR("crtc %d not enabled\n", flip_data->crtc_id);
> +		ret = -EINVAL;
> +		goto out_unlock;
> +	}
> +	old_fb = crtc->fb;
> +	intel_crtc = to_intel_crtc(crtc);
> +	pipe = intel_crtc->pipe;
> +
> +	drm_obj = drm_mode_object_find(dev, flip_data->fb_id,
> +				       DRM_MODE_OBJECT_FB);
> +	if (!drm_obj) {
> +		DRM_DEBUG("unknown fb %d\n", flip_data->fb_id);
> +		ret = -EINVAL;
> +		goto out_unlock;
> +	}
> +	crtc->fb = obj_to_fb(drm_obj);
> +	intel_fb = to_intel_framebuffer(crtc->fb);
> +	obj_priv = intel_fb->obj->driver_private;
> +
> +	if (i915_gem_flip_pending(intel_fb->obj))
> +		wait_for_completion(&obj_priv->vblank);
> +
> +	/* Sanity check tiling */
> +	if (obj_priv->tiling_mode != I915_TILING_NONE &&
> +	    obj_priv->tiling_mode != I915_TILING_X) {
> +		DRM_ERROR("can only flip non-tiled or X tiled
> pages\n");
> +		ret = -EINVAL;
> +		goto out_unlock;
> +	}
> +
> +	/* Get vblank ref for completion handling */
> +	ret = drm_vblank_get(dev, pipe);
> +	if (ret) {
> +		DRM_ERROR("failed to take vblank ref\n");
> +		goto out_unlock;
> +	}
> +
> +	mutex_unlock(&dev->struct_mutex);
> +
> +	crtc_funcs = crtc->helper_private;
> +	crtc_funcs->mode_set_base(crtc, 0, 0, old_fb);
> +	seqno = i915_add_request(dev, 0);
> +
> +	/*
> +	 * This is a bit racy; the flip above may have already happened
> +	 * by the time we get here.  If that happens, the new back
> buffer
> +	 * won't be available for rendering for one extra frame, since
> +	 * the vblank list won't have the object.
> +	 */
> +	spin_lock_irqsave(&dev_priv->vblank_lock, flags);
> +	list_add_tail(&obj_priv->vblank_head,
> &dev_priv->mm.vblank_list[pipe]);
> +	obj_priv->flip_seqno = seqno;
> +	spin_unlock_irqrestore(&dev_priv->vblank_lock, flags);
> +
> +	if (flip_data->flags & I915_PAGE_FLIP_WAIT)
> +		wait_for_completion(&obj_priv->vblank);
> +
> +	return 0;
> +
> +out_unlock:
> +	mutex_unlock(&dev->struct_mutex);
> +
> +	return ret;
> +}
> +
>  /**
>   * i915_probe_agp - get AGP bootup configuration
>   * @pdev: PCI device
> @@ -1349,6 +1470,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
>  	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
>  	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
>  	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE,
> i915_gem_get_aperture_ioctl, 0),
> +	DRM_IOCTL_DEF(DRM_I915_GEM_PAGE_FLIP, i915_gem_page_flip,
> DRM_AUTH|DRM_MASTER), };
>  
>  int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
> diff --git a/drivers/gpu/drm/i915/i915_drv.h
> b/drivers/gpu/drm/i915/i915_drv.h index efcd610..9f40a13 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -162,6 +162,10 @@ typedef struct drm_i915_private {
>  	u32 hotplug_supported_mask;
>  	struct work_struct hotplug_work;
>  
> +	/** Protects vblank list */
> +	spinlock_t vblank_lock;
> +	struct work_struct vblank_work;
> +
>  	int tex_lru_log_granularity;
>  	int allow_batchbuffer;
>  	struct mem_block *agp_heap;
> @@ -341,6 +345,11 @@ typedef struct drm_i915_private {
>  		 */
>  		struct delayed_work retire_work;
>  
> +		/**
> +		 * List of objects waiting on vblank events (one per
> pipe)
> +		 */
> +		struct list_head vblank_list[2];
> +
>  		uint32_t next_gem_seqno;
>  
>  		/**
> @@ -392,6 +401,13 @@ struct drm_i915_gem_object {
>  	/** This object's place on the active/flushing/inactive lists
> */ struct list_head list;
>  
> +	/** Object's place on the vblank list (protected by
> vblank_lock)*/
> +	struct list_head vblank_head;
> +	/** sequence number for flip (when it passes the flip is done),
> +	 *  protected by vblank lock
> +	 */
> +	int flip_seqno;
> +
>  	/**
>  	 * This is set if the object is on the active or flushing lists
>  	 * (has pending rendering), and is not set if it's on inactive
> (ready @@ -462,6 +478,9 @@ struct drm_i915_gem_object {
>  	/** for phy allocated objects */
>  	struct drm_i915_gem_phys_object *phys_obj;
>  
> +	/** for page flips and other vblank related blocks */
> +	struct completion vblank;
> +
>  	/**
>  	 * Used for checking the object doesn't appear more than once
>  	 * in an execbuffer object list.
> @@ -526,6 +545,7 @@ extern long i915_compat_ioctl(struct file *filp,
> unsigned int cmd, extern int i915_emit_box(struct drm_device *dev,
>  			 struct drm_clip_rect *boxes,
>  			 int i, int DR1, int DR4);
> +extern bool i915_gem_flip_pending(struct drm_gem_object *obj);
>  
>  /* i915_irq.c */
>  extern int i915_irq_emit(struct drm_device *dev, void *data,
> @@ -637,6 +657,9 @@ void i915_gem_detach_phys_object(struct drm_device
> *dev, void i915_gem_free_all_phys_object(struct drm_device *dev);
>  int i915_gem_object_get_pages(struct drm_gem_object *obj);
>  void i915_gem_object_put_pages(struct drm_gem_object *obj);
> +uint32_t i915_add_request(struct drm_device *dev, uint32_t
> flush_domains); +int i915_seqno_passed(uint32_t seq1, uint32_t seq2);
> +uint32_t i915_get_gem_seqno(struct drm_device *dev);
>  
>  /* i915_gem_tiling.c */
>  void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c
> b/drivers/gpu/drm/i915/i915_gem.c index 6f7d0e2..136dfa0 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1374,7 +1374,7 @@ i915_gem_object_move_to_inactive(struct
> drm_gem_object *obj) *
>   * Returned sequence numbers are nonzero on success.
>   */
> -static uint32_t
> +uint32_t
>  i915_add_request(struct drm_device *dev, uint32_t flush_domains)
>  {
>  	drm_i915_private_t *dev_priv = dev->dev_private;
> @@ -1505,7 +1505,7 @@ out:
>  /**
>   * Returns true if seq1 is later than seq2.
>   */
> -static int
> +int
>  i915_seqno_passed(uint32_t seq1, uint32_t seq2)
>  {
>  	return (int32_t)(seq1 - seq2) >= 0;
> @@ -3116,6 +3116,26 @@ i915_gem_execbuffer(struct drm_device *dev, void
> *data, if (ret != 0)
>  		goto pre_mutex_err;
>  
> +	/* Look up object handles */
> +	for (i = 0; i < args->buffer_count; i++) {
> +		object_list[i] = drm_gem_object_lookup(dev, file_priv,
> +
> exec_list[i].handle);
> +		if (object_list[i] == NULL) {
> +			DRM_ERROR("Invalid object handle %d at index
> %d\n",
> +				   exec_list[i].handle, i);
> +			ret = -EBADF;
> +			mutex_lock(&dev->struct_mutex);
> +			goto err;
> +		}
> +
> +		if (i915_gem_flip_pending(object_list[i])) {
> +			struct drm_i915_gem_object *obj_priv;
> +
> +			obj_priv = object_list[i]->driver_private;
> +			wait_for_completion(&obj_priv->vblank);
> +		}
> +	}
> +
>  	mutex_lock(&dev->struct_mutex);
>  
>  	i915_verify_inactive(dev, __FILE__, __LINE__);
> @@ -3134,17 +3154,8 @@ i915_gem_execbuffer(struct drm_device *dev, void
> *data, goto pre_mutex_err;
>  	}
>  
> -	/* Look up object handles */
> +	/* Sanity check list for double entries */
>  	for (i = 0; i < args->buffer_count; i++) {
> -		object_list[i] = drm_gem_object_lookup(dev, file_priv,
> -
> exec_list[i].handle);
> -		if (object_list[i] == NULL) {
> -			DRM_ERROR("Invalid object handle %d at index
> %d\n",
> -				   exec_list[i].handle, i);
> -			ret = -EBADF;
> -			goto err;
> -		}
> -
>  		obj_priv = object_list[i]->driver_private;
>  		if (obj_priv->in_execbuffer) {
>  			DRM_ERROR("Object %p appears more than once in
> object list\n", @@ -3581,6 +3592,9 @@ int i915_gem_init_object(struct
> drm_gem_object *obj) obj_priv->obj = obj;
>  	obj_priv->fence_reg = I915_FENCE_REG_NONE;
>  	INIT_LIST_HEAD(&obj_priv->list);
> +	INIT_LIST_HEAD(&obj_priv->vblank_head);
> +
> +	init_completion(&obj_priv->vblank);
>  
>  	return 0;
>  }
> @@ -3641,8 +3655,10 @@ int
>  i915_gem_idle(struct drm_device *dev)
>  {
>  	drm_i915_private_t *dev_priv = dev->dev_private;
> +	struct drm_i915_gem_object *obj_priv, *tmp;
> +	unsigned long irqflags;
>  	uint32_t seqno, cur_seqno, last_seqno;
> -	int stuck, ret;
> +	int stuck, ret, pipe;
>  
>  	mutex_lock(&dev->struct_mutex);
>  
> @@ -3656,9 +3672,23 @@ i915_gem_idle(struct drm_device *dev)
>  	 */
>  	dev_priv->mm.suspended = 1;
>  
> +	mutex_unlock(&dev->struct_mutex);
> +
> +	/* Wait for any outstanding flips */
> +	spin_lock_irqsave(&dev_priv->vblank_lock, irqflags);
> +	for (pipe = 0; pipe < 2; pipe++) {
> +		list_for_each_entry_safe(obj_priv, tmp,
> +
> &dev_priv->mm.vblank_list[pipe],
> +					 vblank_head) {
> +			spin_unlock_irqrestore(&dev_priv->vblank_lock,
> irqflags);
> +			wait_for_completion(&obj_priv->vblank);
> +			spin_lock_irqsave(&dev_priv->vblank_lock,
> irqflags);
> +		}
> +	}
> +	spin_unlock_irqrestore(&dev_priv->vblank_lock, irqflags);
> +
>  	/* Cancel the retire work handler, wait for it to finish if
> running */
> -	mutex_unlock(&dev->struct_mutex);
>  	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
>  	mutex_lock(&dev->struct_mutex);
>  
> @@ -4025,9 +4055,12 @@ i915_gem_load(struct drm_device *dev)
>  	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
>  	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
>  	INIT_LIST_HEAD(&dev_priv->mm.request_list);
> +	INIT_LIST_HEAD(&dev_priv->mm.vblank_list[0]);
> +	INIT_LIST_HEAD(&dev_priv->mm.vblank_list[1]);
>  	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
>  			  i915_gem_retire_work_handler);
>  	dev_priv->mm.next_gem_seqno = 1;
> +	spin_lock_init(&dev_priv->vblank_lock);
>  
>  	/* Old X drivers will take 0-2 for front, back, depth buffers
> */ dev_priv->fence_reg_start = 3;
> diff --git a/drivers/gpu/drm/i915/i915_irq.c
> b/drivers/gpu/drm/i915/i915_irq.c index ee7ce7b..94aee6b 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -196,6 +196,35 @@ static void i915_hotplug_work_func(struct
> work_struct *work) drm_sysfs_hotplug_event(dev);
>  }
>  
> +static void i915_vblank_work_func(struct work_struct *work)
> +{
> +	drm_i915_private_t *dev_priv = container_of(work,
> drm_i915_private_t,
> +						    vblank_work);
> +	struct drm_device *dev = dev_priv->dev;
> +	struct drm_i915_gem_object *obj_priv, *tmp;
> +	unsigned long irqflags;
> +	int pipe, cur_seqno;
> +
> +	mutex_lock(&dev->struct_mutex);
> +
> +	spin_lock_irqsave(&dev_priv->vblank_lock, irqflags);
> +	for (pipe = 0; pipe < 2; pipe++) {
> +		list_for_each_entry_safe(obj_priv, tmp,
> +
> &dev_priv->mm.vblank_list[pipe],
> +					 vblank_head) {
> +			cur_seqno = i915_get_gem_seqno(dev);
> +			if (i915_seqno_passed(cur_seqno,
> +					      obj_priv->flip_seqno)) {
> +				list_del_init(&obj_priv->vblank_head);
> +				drm_vblank_put(dev, pipe);
> +				complete(&obj_priv->vblank);
> +			}
> +		}
> +	}
> +	spin_unlock_irqrestore(&dev_priv->vblank_lock, irqflags);
> +	mutex_unlock(&dev->struct_mutex);
> +}
> +
>  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
>  {
>  	struct drm_device *dev = (struct drm_device *) arg;
> @@ -292,6 +321,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
>  			drm_handle_vblank(dev, 1);
>  		}
>  
> +		if (vblank)
> +			schedule_work(&dev_priv->vblank_work);
> +
>  		if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
>  		    (iir & I915_ASLE_INTERRUPT))
>  			opregion_asle_intr(dev);
> @@ -563,6 +595,7 @@ void i915_driver_irq_preinstall(struct drm_device *
> dev) I915_WRITE(IER, 0x0);
>  	(void) I915_READ(IER);
>  	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
> +	INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_func);
>  }
>  
>  int i915_driver_irq_postinstall(struct drm_device *dev)
> diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
> index 67e3353..b022ba5 100644
> --- a/include/drm/i915_drm.h
> +++ b/include/drm/i915_drm.h
> @@ -184,6 +184,7 @@ typedef struct _drm_i915_sarea {
>  #define DRM_I915_GEM_GET_TILING	0x22
>  #define DRM_I915_GEM_GET_APERTURE 0x23
>  #define DRM_I915_GEM_MMAP_GTT	0x24
> +#define DRM_I915_GEM_PAGE_FLIP	0x25
>  
>  #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE +
> DRM_I915_INIT, drm_i915_init_t) #define
> DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE +
> DRM_I915_FLUSH) @@ -219,6 +220,7 @@ typedef struct _drm_i915_sarea
> { #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR
> (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct
> drm_i915_gem_set_tiling) #define DRM_IOCTL_I915_GEM_GET_TILING
> DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct
> drm_i915_gem_get_tiling) #define DRM_IOCTL_I915_GEM_GET_APERTURE
> DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct
> drm_i915_gem_get_aperture) +#define DRM_IOCTL_I915_GEM_PAGE_FLIP
> DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PAGE_FLIP, struct
> drm_i915_gem_page_flip) /* Allow drivers to submit batchbuffers
> directly to hardware, relying
>   * on the security mechanisms provided by hardware.
> @@ -654,4 +656,20 @@ struct drm_i915_gem_get_aperture {
>  	__u64 aper_available_size;
>  };
>  
> +#define I915_PAGE_FLIP_WAIT (1<<0) /* block on page flip completion */
> +
> +struct drm_i915_gem_page_flip {
> +	/** Handle of new front buffer */
> +	uint32_t fb_id;
> +	/**
> +	 * crtc to flip
> +	 */
> +	uint32_t crtc_id;
> +
> +	/**
> +	 * page flip flags (wait on flip only for now)
> +	 */
> +	uint32_t flags;
> +};
> +
>  #endif				/* _I915_DRM_H_ */
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
-- 
Eric Anholt
eric at anholt.net                         eric.anholt at intel.com


-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 197 bytes
Desc: This is a digitally signed message part
URL: <http://lists.freedesktop.org/archives/intel-gfx/attachments/20090421/1f8c4509/attachment.sig>


More information about the Intel-gfx mailing list