[PATCH 36/36] fast-busy-ioctl
Chris Wilson
chris at chris-wilson.co.uk
Wed May 31 20:28:02 UTC 2017
---
drivers/gpu/drm/i915/i915_drv.c | 131 ++++++++++++++++++++++------------------
drivers/gpu/drm/i915/i915_drv.h | 5 +-
drivers/gpu/drm/i915/i915_gem.c | 47 +++++++++-----
include/drm/drm_ioctl.h | 15 ++++-
4 files changed, 122 insertions(+), 76 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 11142eea0cfe..9008d03d319a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2510,63 +2510,6 @@ static int intel_runtime_resume(struct device *kdev)
return ret;
}
-const struct dev_pm_ops i915_pm_ops = {
- /*
- * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
- * PMSG_RESUME]
- */
- .suspend = i915_pm_suspend,
- .suspend_late = i915_pm_suspend_late,
- .resume_early = i915_pm_resume_early,
- .resume = i915_pm_resume,
-
- /*
- * S4 event handlers
- * @freeze, @freeze_late : called (1) before creating the
- * hibernation image [PMSG_FREEZE] and
- * (2) after rebooting, before restoring
- * the image [PMSG_QUIESCE]
- * @thaw, @thaw_early : called (1) after creating the hibernation
- * image, before writing it [PMSG_THAW]
- * and (2) after failing to create or
- * restore the image [PMSG_RECOVER]
- * @poweroff, @poweroff_late: called after writing the hibernation
- * image, before rebooting [PMSG_HIBERNATE]
- * @restore, @restore_early : called after rebooting and restoring the
- * hibernation image [PMSG_RESTORE]
- */
- .freeze = i915_pm_freeze,
- .freeze_late = i915_pm_freeze_late,
- .thaw_early = i915_pm_thaw_early,
- .thaw = i915_pm_thaw,
- .poweroff = i915_pm_suspend,
- .poweroff_late = i915_pm_poweroff_late,
- .restore_early = i915_pm_restore_early,
- .restore = i915_pm_restore,
-
- /* S0ix (via runtime suspend) event handlers */
- .runtime_suspend = intel_runtime_suspend,
- .runtime_resume = intel_runtime_resume,
-};
-
-static const struct vm_operations_struct i915_gem_vm_ops = {
- .fault = i915_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-static const struct file_operations i915_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = i915_compat_ioctl,
- .llseek = noop_llseek,
-};
-
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -2597,7 +2540,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_DRIVER_IOCTL_DEF(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -2630,6 +2573,39 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
};
+static long i915_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ const struct drm_ioctl_desc *ioctl;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+
+ if (nr - DRM_COMMAND_BASE >= ARRAY_SIZE(i915_ioctls))
+ return drm_ioctl(filp, cmd, arg);
+
+ ioctl = &i915_ioctls[nr - DRM_COMMAND_BASE];
+ if (!(ioctl->flags & DRM_DRIVER_IOCTL))
+ return drm_ioctl(filp, cmd, arg);
+
+ return ioctl->ioctl(filp, cmd, arg);
+}
+
+static const struct vm_operations_struct i915_gem_vm_ops = {
+ .fault = i915_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations i915_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = i915_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .compat_ioctl = i915_compat_ioctl,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
@@ -2666,6 +2642,45 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+const struct dev_pm_ops i915_pm_ops = {
+ /*
+ * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
+ * PMSG_RESUME]
+ */
+ .suspend = i915_pm_suspend,
+ .suspend_late = i915_pm_suspend_late,
+ .resume_early = i915_pm_resume_early,
+ .resume = i915_pm_resume,
+
+ /*
+ * S4 event handlers
+ * @freeze, @freeze_late : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw, @thaw_early : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff, @poweroff_late: called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore, @restore_early : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
+ */
+ .freeze = i915_pm_freeze,
+ .freeze_late = i915_pm_freeze_late,
+ .thaw_early = i915_pm_thaw_early,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_suspend,
+ .poweroff_late = i915_pm_poweroff_late,
+ .restore_early = i915_pm_restore_early,
+ .restore = i915_pm_restore,
+
+ /* S0ix (via runtime suspend) event handlers */
+ .runtime_suspend = intel_runtime_suspend,
+ .runtime_resume = intel_runtime_resume,
+};
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_drm.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cffba638a3c8..5059e9100d8d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3205,8 +3205,9 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+long i915_gem_busy_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2c4ca218fffb..3fa8ada81e0e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4140,21 +4140,28 @@ busy_check_writer(const struct dma_fence *fence)
return __busy_set_if_active(fence, __busy_write_id);
}
-int
-i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+long i915_gem_busy_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long iarg)
{
- struct drm_i915_gem_busy *args = data;
+ struct drm_i915_gem_busy __user *arg = (typeof(arg))iarg;
struct drm_i915_gem_object *obj;
struct reservation_object_list *list;
unsigned int seq;
- int err;
+ u32 handle;
+ u32 busy;
+
+ if (unlikely(!access_ok(VERIFY_WRITE, arg, sizeof(*arg))))
+ goto err_user;
+
+ user_access_begin();
+ unsafe_get_user(handle, &arg->handle, err_user);
+ user_access_end();
- err = -ENOENT;
rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, args->handle);
- if (!obj)
- goto out;
+ obj = i915_gem_object_lookup_rcu(filp->private_data, handle);
+ if (unlikely(!obj))
+ goto err_obj;
/* A discrepancy here is that we do not report the status of
* non-i915 fences, i.e. even though we may report the object as idle,
@@ -4176,7 +4183,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
seq = raw_read_seqcount(&obj->resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
+ busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
/* Translate shared fences to READ set of engines */
list = rcu_dereference(obj->resv->fence);
@@ -4187,17 +4194,27 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct dma_fence *fence =
rcu_dereference(list->shared[i]);
- args->busy |= busy_check_reader(fence);
+ busy |= busy_check_reader(fence);
}
}
- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
+ if (busy && read_seqcount_retry(&obj->resv->seq, seq))
goto retry;
- err = 0;
-out:
rcu_read_unlock();
- return err;
+
+ user_access_begin();
+ unsafe_put_user(busy, &arg->busy, err_user);
+ user_access_end();
+ return 0;
+
+err_user:
+ user_access_end();
+ return -EFAULT;
+
+err_obj:
+ rcu_read_unlock();
+ return -ENOENT;
}
int
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index ee03b3c44b3b..c420481d2078 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -133,6 +133,8 @@ enum drm_ioctl_flags {
* not set DRM_AUTH because they do not require authentication.
*/
DRM_RENDER_ALLOW = BIT(5),
+
+ DRM_DRIVER_IOCTL = BIT(6),
};
/**
@@ -148,7 +150,10 @@ enum drm_ioctl_flags {
struct drm_ioctl_desc {
unsigned int cmd;
enum drm_ioctl_flags flags;
- drm_ioctl_t *func;
+ union {
+ drm_ioctl_t *func;
+ long (*ioctl)(struct file *, unsigned int, unsigned long);
+ };
const char *name;
};
@@ -170,6 +175,14 @@ struct drm_ioctl_desc {
.name = #ioctl \
}
+#define DRM_DRIVER_IOCTL_DEF(i, _func, _flags) \
+ [DRM_IOCTL_NR(DRM_IOCTL_##i) - DRM_COMMAND_BASE] = { \
+ .cmd = DRM_IOCTL_##i, \
+ .ioctl = _func, \
+ .flags = _flags | DRM_DRIVER_IOCTL, \
+ .name = #i \
+ }
+
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
--
2.11.0
More information about the Intel-gfx-trybot
mailing list