[PATCH 16/16] drm: hold mutex in critical sections of render-node code

Ilija Hadzic ihadzic at research.bell-labs.com
Thu Mar 29 09:41:38 PDT 2012


Critical sections are parts of the code where we claim or
release resources (we don't want two render-node create or
remove ioctl called in the context of different processes
to claim part of requested resources because of the race).
Another critical section is manipulating the render node
list. We can use dev->struct_mutex for both.

Signed-off-by: Ilija Hadzic <ihadzic at research.bell-labs.com>
---
 drivers/gpu/drm/drm_stub.c |   15 ++++++++++++++-
 1 files changed, 14 insertions(+), 1 deletions(-)

diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b025ad8..8abfb08 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -480,6 +480,7 @@ static void drm_release_render_node_resources(struct drm_device *dev,
 	int *render_node_owner;
 	int s, e, i, j;
 
+	/* no lock, assume we were called with struct_mutex grabbed */
 	for (e = 0, j = 0; j < DRM_RN_NUM_EXP_TYPES; j++) {
 		s = e;
 		e += resource_count[j];
@@ -503,6 +504,7 @@ static int drm_claim_render_node_resources(struct drm_device *dev,
 	int s, e, i, j;
 	int ret = 0;
 
+	mutex_lock(&dev->struct_mutex);
 	for (e = 0, j = 0; j < DRM_RN_NUM_EXP_TYPES; j++) {
 		s = e;
 		e += resource_count[j];
@@ -523,10 +525,12 @@ static int drm_claim_render_node_resources(struct drm_device *dev,
 			*render_node_owner = minor;
 		}
 	}
+	mutex_unlock(&dev->struct_mutex);
 	return ret;
 
 out_release:
 	drm_release_render_node_resources(dev, id_list, resource_count, minor);
+	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
 
@@ -547,7 +551,9 @@ int drm_create_render_node(struct drm_device *dev, struct drm_minor **minor_p)
 	}
 	render_node->minor = minor;
 	*minor_p = minor;
+	mutex_lock(&dev->struct_mutex);
 	list_add_tail(&render_node->list, &dev->render_node_list);
+	mutex_unlock(&dev->struct_mutex);
 	return 0;
 }
 
@@ -555,13 +561,16 @@ int drm_destroy_render_node(struct drm_device *dev, int index)
 {
 	struct drm_render_node *node, *tmp;
 
+	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry_safe(node, tmp, &dev->render_node_list, list) {
 		if (node->minor->index == index) {
 			struct drm_mode_group *group;
 			int resource_count[DRM_RN_NUM_EXP_TYPES];
 
-			if (node->minor->open_count)
+			if (node->minor->open_count) {
+				mutex_unlock(&dev->struct_mutex);
 				return -EBUSY;
+			}
 			group = &node->minor->mode_group;
 			list_del(&node->list);
 			resource_count[0] = group->num_crtcs;
@@ -570,12 +579,14 @@ int drm_destroy_render_node(struct drm_device *dev, int index)
 			drm_release_render_node_resources(dev, group->id_list,
 							  resource_count,
 							  node->minor->index);
+			mutex_unlock(&dev->struct_mutex);
 			drm_put_minor(&node->minor);
 			drm_mode_group_fini(group);
 			kfree(node);
 			return 0;
 		}
 	}
+	mutex_unlock(&dev->struct_mutex);
 	return -ENODEV;
 }
 
@@ -583,6 +594,7 @@ void drm_destroy_all_render_nodes(struct drm_device *dev)
 {
 	struct drm_render_node *node, *tmp;
 
+	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry_safe(node, tmp, &dev->render_node_list, list) {
 		struct drm_mode_group *group;
 		int resource_count[DRM_RN_NUM_EXP_TYPES];
@@ -599,6 +611,7 @@ void drm_destroy_all_render_nodes(struct drm_device *dev)
 		drm_mode_group_fini(group);
 		kfree(node);
 	}
+	mutex_unlock(&dev->struct_mutex);
 }
 
 /**
-- 
1.7.8.5



More information about the dri-devel mailing list