[PATCH 19/19] drm: hold mutex in critical sections of render-node code

Ilija Hadzic ihadzic at research.bell-labs.com
Thu Apr 12 11:19:44 PDT 2012


Critical sections are parts of the code where we claim or
release resources (we don't want two render-node create or
remove ioctl called in the context of different processes
to claim part of requested resources because of the race).
Another critical section is manipulating the render node
list. We can use dev->mode_config.mutex for both.

v2: - Use dev->mode_config.mutex instead of dev->struct_mutex
      because we are also racing against drm_mode_getresources()
      and drm_mode_getplane_res() functions.

Signed-off-by: Ilija Hadzic <ihadzic at research.bell-labs.com>
---
 drivers/gpu/drm/drm_stub.c |   15 ++++++++++++++-
 1 files changed, 14 insertions(+), 1 deletions(-)

diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 13ff4c8..a5fd905 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -486,6 +486,7 @@ static void drm_release_render_node_resources(struct drm_device *dev,
 	int *render_node_owner;
 	int s, e, i, j;
 
+	/* no lock, assume we were called with mode_config mutex grabbed */
 	for (e = 0, j = 0; j < DRM_RN_NUM_EXP_TYPES; j++) {
 		s = e;
 		e += resource_count[j];
@@ -509,6 +510,7 @@ static int drm_claim_render_node_resources(struct drm_device *dev,
 	int s, e, i, j;
 	int ret = 0;
 
+	mutex_lock(&dev->mode_config.mutex);
 	for (e = 0, j = 0; j < DRM_RN_NUM_EXP_TYPES; j++) {
 		s = e;
 		e += resource_count[j];
@@ -529,10 +531,12 @@ static int drm_claim_render_node_resources(struct drm_device *dev,
 			*render_node_owner = minor;
 		}
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 	return ret;
 
 out_release:
 	drm_release_render_node_resources(dev, id_list, resource_count, minor);
+	mutex_unlock(&dev->mode_config.mutex);
 	return ret;
 }
 
@@ -553,7 +557,9 @@ int drm_create_render_node(struct drm_device *dev, struct drm_minor **minor_p)
 	}
 	render_node->minor = minor;
 	*minor_p = minor;
+	mutex_lock(&dev->mode_config.mutex);
 	list_add_tail(&render_node->list, &dev->render_node_list);
+	mutex_unlock(&dev->mode_config.mutex);
 	return 0;
 }
 
@@ -561,13 +567,16 @@ int drm_destroy_render_node(struct drm_device *dev, int index)
 {
 	struct drm_render_node *node, *tmp;
 
+	mutex_lock(&dev->mode_config.mutex);
 	list_for_each_entry_safe(node, tmp, &dev->render_node_list, list) {
 		if (node->minor->index == index) {
 			struct drm_mode_group *group;
 			int resource_count[DRM_RN_NUM_EXP_TYPES];
 
-			if (node->minor->open_count)
+			if (node->minor->open_count) {
+				mutex_unlock(&dev->mode_config.mutex);
 				return -EBUSY;
+			}
 			group = &node->minor->mode_group;
 			list_del(&node->list);
 			resource_count[0] = group->num_crtcs;
@@ -577,12 +586,14 @@ int drm_destroy_render_node(struct drm_device *dev, int index)
 			drm_release_render_node_resources(dev, group->id_list,
 							  resource_count,
 							  node->minor->index);
+			mutex_unlock(&dev->mode_config.mutex);
 			drm_put_minor(&node->minor);
 			drm_mode_group_fini(group);
 			kfree(node);
 			return 0;
 		}
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 	return -ENODEV;
 }
 
@@ -590,6 +601,7 @@ void drm_destroy_all_render_nodes(struct drm_device *dev)
 {
 	struct drm_render_node *node, *tmp;
 
+	mutex_lock(&dev->mode_config.mutex);
 	list_for_each_entry_safe(node, tmp, &dev->render_node_list, list) {
 		struct drm_mode_group *group;
 		int resource_count[DRM_RN_NUM_EXP_TYPES];
@@ -607,6 +619,7 @@ void drm_destroy_all_render_nodes(struct drm_device *dev)
 		drm_mode_group_fini(group);
 		kfree(node);
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 }
 
 /**
-- 
1.7.8.5



More information about the dri-devel mailing list