[Intel-gfx] [PATCH 3/3] libIntelXvMC: kill ums leftovers

Daniel Vetter daniel.vetter at ffwll.ch
Mon Mar 1 22:57:40 CET 2010


On i965 class hw, kernel_exec_fencing was 1 always, anyway. And on
i945, this patch kills a memory leak (dunno how, but it does).

Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
 src/xvmc/i965_xvmc.c         |   12 +++---------
 src/xvmc/intel_batchbuffer.c |   20 ++++----------------
 src/xvmc/intel_xvmc.c        |    2 --
 src/xvmc/intel_xvmc.h        |    1 -
 src/xvmc/xvmc_vld.c          |   20 ++++----------------
 5 files changed, 11 insertions(+), 44 deletions(-)

diff --git a/src/xvmc/i965_xvmc.c b/src/xvmc/i965_xvmc.c
index ff67995..3de6054 100644
--- a/src/xvmc/i965_xvmc.c
+++ b/src/xvmc/i965_xvmc.c
@@ -731,11 +731,8 @@ static Status render_surface(Display * display,
 	}
 
 	if (media_state.indirect_data.bo) {
-		if (xvmc_driver->kernel_exec_fencing)
-			drm_intel_gem_bo_unmap_gtt(media_state.
-						   indirect_data.bo);
-		else
-			drm_intel_bo_unmap(media_state.indirect_data.bo);
+		drm_intel_gem_bo_unmap_gtt(media_state.
+					   indirect_data.bo);
 
 		drm_intel_bo_unreference(media_state.indirect_data.bo);
 	}
@@ -755,10 +752,7 @@ static Status render_surface(Display * display,
 	interface_descriptor(&media_state);
 	vfe_state(&media_state);
 
-	if (xvmc_driver->kernel_exec_fencing)
-		drm_intel_gem_bo_map_gtt(media_state.indirect_data.bo);
-	else
-		drm_intel_bo_map(media_state.indirect_data.bo, 1);
+	drm_intel_gem_bo_map_gtt(media_state.indirect_data.bo);
 
 	block_ptr = media_state.indirect_data.bo->virtual;
 	for (i = first_macroblock; i < num_macroblocks + first_macroblock; i++) {
diff --git a/src/xvmc/intel_batchbuffer.c b/src/xvmc/intel_batchbuffer.c
index 1807d2c..fcd2866 100644
--- a/src/xvmc/intel_batchbuffer.c
+++ b/src/xvmc/intel_batchbuffer.c
@@ -73,10 +73,7 @@ Bool intelInitBatchBuffer(void)
 		return False;
 	}
 
-	if (xvmc_driver->kernel_exec_fencing)
-		drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
-	else
-		drm_intel_bo_map(xvmc_driver->batch.buf, 1);
+	drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
 
 	xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual;
 	xvmc_driver->batch.size = BATCH_SIZE;
@@ -87,10 +84,7 @@ Bool intelInitBatchBuffer(void)
 
 void intelFiniBatchBuffer(void)
 {
-	if (xvmc_driver->kernel_exec_fencing)
-		drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
-	else
-		drm_intel_bo_unmap(xvmc_driver->batch.buf);
+	drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
 
 	drm_intel_bo_unreference(xvmc_driver->batch.buf);
 }
@@ -99,10 +93,7 @@ void intelFlushBatch(Bool refill)
 {
 	i965_end_batch();
 
-	if (xvmc_driver->kernel_exec_fencing)
-		drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
-	else
-		drm_intel_bo_unmap(xvmc_driver->batch.buf);
+	drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
 
 	drm_intel_bo_exec(xvmc_driver->batch.buf,
 			  xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr,
@@ -118,10 +109,7 @@ void intelFlushBatch(Bool refill)
 		fprintf(stderr, "unable to alloc batch buffer\n");
 	}
 
-	if (xvmc_driver->kernel_exec_fencing)
-		drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
-	else
-		drm_intel_bo_map(xvmc_driver->batch.buf, 1);
+	drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
 
 	xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual;
 	xvmc_driver->batch.size = BATCH_SIZE;
diff --git a/src/xvmc/intel_xvmc.c b/src/xvmc/intel_xvmc.c
index c94ae4a..328d3c1 100644
--- a/src/xvmc/intel_xvmc.c
+++ b/src/xvmc/intel_xvmc.c
@@ -421,8 +421,6 @@ _X_EXPORT Status XvMCCreateContext(Display * display, XvPortID port,
 
 	XVMC_INFO("decoder type is %s", intel_xvmc_decoder_string(comm->type));
 
-	xvmc_driver->kernel_exec_fencing = comm->kernel_exec_fencing;
-
 	/* assign local ctx info */
 	intel_ctx = intel_xvmc_new_context(display);
 	if (!intel_ctx) {
diff --git a/src/xvmc/intel_xvmc.h b/src/xvmc/intel_xvmc.h
index 43153cc..60a2fbb 100644
--- a/src/xvmc/intel_xvmc.h
+++ b/src/xvmc/intel_xvmc.h
@@ -131,7 +131,6 @@ typedef struct _intel_xvmc_driver {
 	int fd;			/* drm file handler */
 
 	dri_bufmgr *bufmgr;
-	unsigned int kernel_exec_fencing:1;
 
 	struct {
 		unsigned int init_offset;
diff --git a/src/xvmc/xvmc_vld.c b/src/xvmc/xvmc_vld.c
index dca0573..bea1ec7 100644
--- a/src/xvmc/xvmc_vld.c
+++ b/src/xvmc/xvmc_vld.c
@@ -1010,10 +1010,7 @@ static Status put_slice2(Display * display, XvMCContext * context,
 	q_scale_code = bit_buf >> 27;
 
 	if (media_state.slice_data.bo) {
-		if (xvmc_driver->kernel_exec_fencing)
-			drm_intel_gem_bo_unmap_gtt(media_state.slice_data.bo);
-		else
-			drm_intel_bo_unmap(media_state.slice_data.bo);
+		drm_intel_gem_bo_unmap_gtt(media_state.slice_data.bo);
 
 		drm_intel_bo_unreference(media_state.slice_data.bo);
 	}
@@ -1022,10 +1019,7 @@ static Status put_slice2(Display * display, XvMCContext * context,
 						       VLD_MAX_SLICE_SIZE, 64);
 	if (!media_state.slice_data.bo)
 		return BadAlloc;
-	if (xvmc_driver->kernel_exec_fencing)
-		drm_intel_gem_bo_map_gtt(media_state.slice_data.bo);
-	else
-		drm_intel_bo_map(media_state.slice_data.bo, 1);
+	drm_intel_gem_bo_map_gtt(media_state.slice_data.bo);
 
 	memcpy(media_state.slice_data.bo->virtual, slice, nbytes);
 
@@ -1110,10 +1104,7 @@ static Status render_surface(Display * display,
 		return ret;
 
 	if (media_state.mb_data.bo) {
-		if (xvmc_driver->kernel_exec_fencing)
-			drm_intel_gem_bo_unmap_gtt(media_state.mb_data.bo);
-		else
-			drm_intel_bo_unmap(media_state.mb_data.bo);
+		drm_intel_gem_bo_unmap_gtt(media_state.mb_data.bo);
 
 		drm_intel_bo_unreference(media_state.mb_data.bo);
 	}
@@ -1125,10 +1116,7 @@ static Status render_surface(Display * display,
 						    surface_size, 64);
 	if (!media_state.mb_data.bo)
 		return BadAlloc;
-	if (xvmc_driver->kernel_exec_fencing)
-		drm_intel_gem_bo_map_gtt(media_state.mb_data.bo);
-	else
-		drm_intel_bo_map(media_state.mb_data.bo, 1);
+	drm_intel_gem_bo_map_gtt(media_state.mb_data.bo);
 
 	block_ptr = media_state.mb_data.bo->virtual;
 	unsigned short *mb_block_ptr;
-- 
1.6.6.1




More information about the Intel-gfx mailing list