[Intel-gfx] [RFC v2 6/8] drm: Handle fbdev emulation in core

Noralf Trønnes noralf at tronnes.org
Wed Jan 10 17:02:38 UTC 2018


Den 09.01.2018 11.38, skrev Daniel Vetter:
> On Wed, Jan 03, 2018 at 11:21:08PM +0100, Noralf Trønnes wrote:
>> Prepare for generic fbdev emulation by letting DRM core work directly
>> with the fbdev compatibility layer. This is done by adding new fbdev
>> helper vtable callbacks for restore, hotplug_event, unregister and
>> release.
>>
>> Signed-off-by: Noralf Trønnes <noralf at tronnes.org>
> No clue whether my idea is any good, but inspired by the bootsplash
> discussion, and the prospect that we might get other in-kernel drm/kms
> clients I'm wondering whether we should make this a bit more generic and
> tie it to drm_file?
>
> The idea would be to have a list of internal drm clients by putting all
> the internal drm_files onto a list (same way we do with the userspace
> ones). Then we'd just walk that list and call ->hotplug, ->unregister and
> ->release at the right places.
>
> ->restore would be a bit different, I guess for that we'd only call the
> ->restore callback of the very first kernel-internal client.
>
> With that we could then add/remove kernel-internal drm clients like normal
> userspace clients, which should make integration of emergency consoles,
> boot splashes and whatever else real easy. And I think it would be a lot
> cleaner than leaking fb_helper knowledge into the drm core, which feels a
> rather backwards.
>
> Otoh that feels a bit overengineered (but at least it shouldn't be a lot
> more code really). If the list is too much work we could start with 1
> drm_file * pointer for internal stuff (but would still need locking, so
> list_head is probably easier).
>
> Thoughts?

I prefer to have a proper in-kernel client API from the get go, instead
of fixing it up later. The reason I skipped spending time on it in this
RFC, was that I didn't know if I was on the right track at the right time
to get the necessary attention to make this dumb_buffer idea happen.

With a drm_file centric approach, are you thinking something like this:

  struct drm_device {
+    struct list_head filelist_internal;
  };

+struct drm_file_funcs {
+    int (*restore)(struct drm_file *file);
+    void (*hotplug)(struct drm_file *file);
+    void (*unregister)(struct drm_file *file);
+};

  struct drm_file {
+    struct drm_device *dev;
+    const struct drm_file_funcs *funcs;
  };

  struct drm_file *drm_file_alloc(struct drm_minor *minor)
  {
+    file->dev = dev;
  }

struct drm_file* drm_internal_open(struct drm_device *dev,
                    const struct drm_file_funcs *funcs)
     struct drm_file *file;
     int ret;

     if (!try_module_get(dev->driver->fops->owner))
         return ERR_PTR(-ENODEV);

     drm_dev_ref(dev);

     file = drm_file_alloc(dev);
     if (IS_ERR(file)) {
         drm_dev_unref(dev);
         module_put(dev->driver->fops->owner);
         return file;
     }

     file->funcs = funcs;

     mutex_lock(&dev->filelist_mutex);
     list_add(&file->lhead, &dev->filelist_internal);
     mutex_unlock(&dev->filelist_mutex);

     return file;
}

void drm_internal_release(struct drm_file *file)
{
     struct drm_device *dev = file->dev;

     mutex_lock(&dev->filelist_mutex);
     list_del(&file->lhead);
     mutex_unlock(&dev->filelist_mutex);

     drm_file_free(file);
     drm_dev_unref(dev);
     module_put(dev->driver->fops->owner);
}

  void drm_lastclose(struct drm_device *dev)
  {

+    mutex_lock(&dev->filelist_mutex);
+    list_for_each_entry(file, &dev->filelist_internal, lhead) {
+        if (file->funcs && file->funcs->restore &&
+            !file->funcs->restore(file))
+                break;
+    mutex_unlock(&dev->filelist_mutex);
  }

  void drm_kms_helper_hotplug_event(struct drm_device *dev)
  {

+    mutex_lock(&dev->filelist_mutex);
+    list_for_each_entry(file, &dev->filelist_internal, lhead) {
+        if (file->funcs && file->funcs->hotplug)
+            file->funcs->hotplug(file);
+    mutex_unlock(&dev->filelist_mutex);
  }

How is locking done when .unregister will call into drm_internal_release()?

  void drm_dev_unregister(struct drm_device *dev)
  {

+    list_for_each_entry(file, &dev->filelist_internal, lhead) {
+        if (file->funcs && file->funcs->unregister)
+            file->funcs->unregister(file);
  }

There is also the case where .unregister can't release the drm_file
because userspace has mmap'ed the buffer (fbdev). The client holds a ref
on drm_device so cleanup is stalled but that requires a driver that can
handle that (ref the tinydrm unplug series which is awaiting a timeslot).

amdgpu iterates dev->filelist in amdgpu_gem_force_release() and
amdgpu_sched_process_priority_override(). I don't know if it needs to
handle the internal ones as well.

What's the rational for having separate lists for internal and userspace?

There is one thing missing and that is notification of new drm_devices.
The client iterates the available drm_devices on module init, but it
needs a way to know about any later drm_device registrations.

Noralf.

> -Daniel
>
>> ---
>>   drivers/gpu/drm/drm_file.c         | 12 +++++++++++-
>>   drivers/gpu/drm/drm_mode_config.c  | 10 ++++++++++
>>   drivers/gpu/drm/drm_probe_helper.c |  4 ++++
>>   include/drm/drm_fb_helper.h        | 33 +++++++++++++++++++++++++++++++++
>>   4 files changed, 58 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
>> index 400d44437e93..7ec09fb83135 100644
>> --- a/drivers/gpu/drm/drm_file.c
>> +++ b/drivers/gpu/drm/drm_file.c
>> @@ -35,6 +35,7 @@
>>   #include <linux/slab.h>
>>   #include <linux/module.h>
>>   
>> +#include <drm/drm_fb_helper.h>
>>   #include <drm/drm_file.h>
>>   #include <drm/drmP.h>
>>   
>> @@ -441,10 +442,19 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
>>   
>>   void drm_lastclose(struct drm_device * dev)
>>   {
>> +	struct drm_fb_helper *fb_helper = dev->fb_helper;
>> +	int ret;
>> +
>>   	DRM_DEBUG("\n");
>>   
>> -	if (dev->driver->lastclose)
>> +	if (dev->driver->lastclose) {
>>   		dev->driver->lastclose(dev);
>> +	} else if (fb_helper && fb_helper->funcs && fb_helper->funcs->restore) {
>> +		ret = fb_helper->funcs->restore(fb_helper);
>> +		if (ret)
>> +			DRM_ERROR("Failed to restore fbdev: %d\n", ret);
>> +	}
>> +
>>   	DRM_DEBUG("driver lastclose completed\n");
>>   
>>   	if (drm_core_check_feature(dev, DRIVER_LEGACY))
>> diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
>> index bc5c46306b3d..260eb1730244 100644
>> --- a/drivers/gpu/drm/drm_mode_config.c
>> +++ b/drivers/gpu/drm/drm_mode_config.c
>> @@ -21,6 +21,7 @@
>>    */
>>   
>>   #include <drm/drm_encoder.h>
>> +#include <drm/drm_fb_helper.h>
>>   #include <drm/drm_mode_config.h>
>>   #include <drm/drmP.h>
>>   
>> @@ -61,6 +62,11 @@ int drm_modeset_register_all(struct drm_device *dev)
>>   
>>   void drm_modeset_unregister_all(struct drm_device *dev)
>>   {
>> +	struct drm_fb_helper *fb_helper = dev->fb_helper;
>> +
>> +	if (fb_helper && fb_helper->funcs && fb_helper->funcs->unregister)
>> +		fb_helper->funcs->unregister(fb_helper);
>> +
>>   	drm_connector_unregister_all(dev);
>>   	drm_encoder_unregister_all(dev);
>>   	drm_crtc_unregister_all(dev);
>> @@ -408,6 +414,7 @@ EXPORT_SYMBOL(drm_mode_config_init);
>>    */
>>   void drm_mode_config_cleanup(struct drm_device *dev)
>>   {
>> +	struct drm_fb_helper *fb_helper = dev->fb_helper;
>>   	struct drm_connector *connector;
>>   	struct drm_connector_list_iter conn_iter;
>>   	struct drm_crtc *crtc, *ct;
>> @@ -417,6 +424,9 @@ void drm_mode_config_cleanup(struct drm_device *dev)
>>   	struct drm_property_blob *blob, *bt;
>>   	struct drm_plane *plane, *plt;
>>   
>> +	if (fb_helper && fb_helper->funcs && fb_helper->funcs->release)
>> +		fb_helper->funcs->release(fb_helper);
>> +
>>   	list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
>>   				 head) {
>>   		encoder->funcs->destroy(encoder);
>> diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
>> index 555fbe54d6e2..9d8b0ba54173 100644
>> --- a/drivers/gpu/drm/drm_probe_helper.c
>> +++ b/drivers/gpu/drm/drm_probe_helper.c
>> @@ -559,10 +559,14 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
>>    */
>>   void drm_kms_helper_hotplug_event(struct drm_device *dev)
>>   {
>> +	struct drm_fb_helper *fb_helper = dev->fb_helper;
>> +
>>   	/* send a uevent + call fbdev */
>>   	drm_sysfs_hotplug_event(dev);
>>   	if (dev->mode_config.funcs->output_poll_changed)
>>   		dev->mode_config.funcs->output_poll_changed(dev);
>> +	else if (fb_helper && fb_helper->funcs && fb_helper->funcs->hotplug_event)
>> +		fb_helper->funcs->hotplug_event(fb_helper);
>>   }
>>   EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
>>   
>> diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
>> index 16d8773b60e3..385f967c3552 100644
>> --- a/include/drm/drm_fb_helper.h
>> +++ b/include/drm/drm_fb_helper.h
>> @@ -125,6 +125,39 @@ struct drm_fb_helper_funcs {
>>   			       struct drm_display_mode **modes,
>>   			       struct drm_fb_offset *offsets,
>>   			       bool *enabled, int width, int height);
>> +
>> +	/**
>> +	 * @restore:
>> +	 *
>> +	 * Optional callback for restoring fbdev emulation.
>> +	 * Called by drm_lastclose() if &drm_driver->lastclose is not set.
>> +	 */
>> +	int (*restore)(struct drm_fb_helper *fb_helper);
>> +
>> +	/**
>> +	 * @hotplug_event:
>> +	 *
>> +	 * Optional callback for hotplug events.
>> +	 * Called by drm_kms_helper_hotplug_event() if
>> +	 * &drm_mode_config_funcs->output_poll_changed  is not set.
>> +	 */
>> +	int (*hotplug_event)(struct drm_fb_helper *fb_helper);
>> +
>> +	/**
>> +	 * @unregister:
>> +	 *
>> +	 * Optional callback for unregistrering fbdev emulation.
>> +	 * Called by drm_dev_unregister().
>> +	 */
>> +	void (*unregister)(struct drm_fb_helper *fb_helper);
>> +
>> +	/**
>> +	 * @release:
>> +	 *
>> +	 * Optional callback for releasing fbdev emulation resources.
>> +	 * Called by drm_mode_config_cleanup().
>> +	 */
>> +	void (*release)(struct drm_fb_helper *fb_helper);
>>   };
>>   
>>   struct drm_fb_helper_connector {
>> -- 
>> 2.14.2
>>



More information about the Intel-gfx mailing list