[Mesa-dev] [PATCH] radeonsi: remove Constant Engine support

Dieter Nützel Dieter at nuetzel-hh.de
Mon Aug 21 02:21:32 UTC 2017


Tested-by: Dieter Nützel <Dieter at nuetzel-hh.de>

on RX580

smaller foot print, now;-)

Dieter

Am 19.08.2017 19:03, schrieb Marek Olšák:
> From: Marek Olšák <marek.olsak at amd.com>
> 
> We have come to the conclusion that it doesn't improve performance.
> ---
>  src/gallium/drivers/radeon/r600_gpu_load.c    |   3 -
>  src/gallium/drivers/radeon/r600_pipe_common.c |   2 -
>  src/gallium/drivers/radeon/r600_pipe_common.h |   5 +-
>  src/gallium/drivers/radeon/r600_query.c       |   3 -
>  src/gallium/drivers/radeon/r600_query.h       |   1 -
>  src/gallium/drivers/radeon/radeon_winsys.h    |  30 ---
>  src/gallium/drivers/radeonsi/si_compute.c     |   4 -
>  src/gallium/drivers/radeonsi/si_debug.c       |  12 +-
>  src/gallium/drivers/radeonsi/si_descriptors.c | 274 
> +++-----------------------
>  src/gallium/drivers/radeonsi/si_hw_context.c  |  52 +----
>  src/gallium/drivers/radeonsi/si_pipe.c        |  43 ----
>  src/gallium/drivers/radeonsi/si_pipe.h        |   9 -
>  src/gallium/drivers/radeonsi/si_state.h       |  18 --
>  src/gallium/drivers/radeonsi/si_state_draw.c  |  39 ----
>  src/gallium/winsys/amdgpu/drm/amdgpu_cs.c     |  98 ---------
>  src/gallium/winsys/amdgpu/drm/amdgpu_cs.h     |  12 +-
>  16 files changed, 35 insertions(+), 570 deletions(-)
> 
> diff --git a/src/gallium/drivers/radeon/r600_gpu_load.c
> b/src/gallium/drivers/radeon/r600_gpu_load.c
> index 4e9f7ea..ee9f592 100644
> --- a/src/gallium/drivers/radeon/r600_gpu_load.c
> +++ b/src/gallium/drivers/radeon/r600_gpu_load.c
> @@ -116,21 +116,20 @@ static void r600_update_mmio_counters(struct
> r600_common_screen *rscreen,
>  	if (rscreen->chip_class >= VI) {
>  		/* CP_STAT */
>  		rscreen->ws->read_registers(rscreen->ws, CP_STAT, 1, &value);
> 
>  		UPDATE_COUNTER(pfp, PFP_BUSY);
>  		UPDATE_COUNTER(meq, MEQ_BUSY);
>  		UPDATE_COUNTER(me, ME_BUSY);
>  		UPDATE_COUNTER(surf_sync, SURFACE_SYNC_BUSY);
>  		UPDATE_COUNTER(cp_dma, DMA_BUSY);
>  		UPDATE_COUNTER(scratch_ram, SCRATCH_RAM_BUSY);
> -		UPDATE_COUNTER(ce, CE_BUSY);
>  	}
> 
>  	value = gui_busy || sdma_busy;
>  	UPDATE_COUNTER(gpu, IDENTITY);
>  }
> 
>  #undef UPDATE_COUNTER
> 
>  static int
>  r600_gpu_load_thread(void *param)
> @@ -259,22 +258,20 @@ static unsigned busy_index_from_type(struct
> r600_common_screen *rscreen,
>  	case R600_QUERY_GPU_MEQ_BUSY:
>  		return BUSY_INDEX(rscreen, meq);
>  	case R600_QUERY_GPU_ME_BUSY:
>  		return BUSY_INDEX(rscreen, me);
>  	case R600_QUERY_GPU_SURF_SYNC_BUSY:
>  		return BUSY_INDEX(rscreen, surf_sync);
>  	case R600_QUERY_GPU_CP_DMA_BUSY:
>  		return BUSY_INDEX(rscreen, cp_dma);
>  	case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
>  		return BUSY_INDEX(rscreen, scratch_ram);
> -	case R600_QUERY_GPU_CE_BUSY:
> -		return BUSY_INDEX(rscreen, ce);
>  	default:
>  		unreachable("invalid query type");
>  	}
>  }
> 
>  uint64_t r600_begin_counter(struct r600_common_screen *rscreen, 
> unsigned type)
>  {
>  	unsigned busy_index = busy_index_from_type(rscreen, type);
>  	return r600_read_mmio_counter(rscreen, busy_index);
>  }
> diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c
> b/src/gallium/drivers/radeon/r600_pipe_common.c
> index 4d1b31d..f1f0cb3 100644
> --- a/src/gallium/drivers/radeon/r600_pipe_common.c
> +++ b/src/gallium/drivers/radeon/r600_pipe_common.c
> @@ -818,22 +818,20 @@ static const struct debug_named_value
> common_debug_options[] = {
>  	{ "switch_on_eop", DBG_SWITCH_ON_EOP, "Program WD/IA to switch on
> end-of-packet." },
>  	{ "forcedma", DBG_FORCE_DMA, "Use asynchronous DMA for all
> operations when possible." },
>  	{ "precompile", DBG_PRECOMPILE, "Compile one shader variant at
> shader creation." },
>  	{ "nowc", DBG_NO_WC, "Disable GTT write combining" },
>  	{ "check_vm", DBG_CHECK_VM, "Check VM faults and dump debug info." },
>  	{ "nodcc", DBG_NO_DCC, "Disable DCC." },
>  	{ "nodccclear", DBG_NO_DCC_CLEAR, "Disable DCC fast clear." },
>  	{ "norbplus", DBG_NO_RB_PLUS, "Disable RB+." },
>  	{ "sisched", DBG_SI_SCHED, "Enable LLVM SI Machine Instruction 
> Scheduler." },
>  	{ "mono", DBG_MONOLITHIC_SHADERS, "Use old-style monolithic shaders
> compiled on demand" },
> -	{ "ce", DBG_CE, "Force enable the constant engine" },
> -	{ "noce", DBG_NO_CE, "Disable the constant engine"},
>  	{ "unsafemath", DBG_UNSAFE_MATH, "Enable unsafe math shader 
> optimizations" },
>  	{ "nodccfb", DBG_NO_DCC_FB, "Disable separate DCC on the main 
> framebuffer" },
> 
>  	DEBUG_NAMED_VALUE_END /* must be last */
>  };
> 
>  static const char* r600_get_vendor(struct pipe_screen* pscreen)
>  {
>  	return "X.Org";
>  }
> diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h
> b/src/gallium/drivers/radeon/r600_pipe_common.h
> index 8be11ef..6072e24 100644
> --- a/src/gallium/drivers/radeon/r600_pipe_common.h
> +++ b/src/gallium/drivers/radeon/r600_pipe_common.h
> @@ -63,21 +63,21 @@
> 
>  /* special primitive types */
>  #define R600_PRIM_RECTANGLE_LIST	PIPE_PRIM_MAX
> 
>  /* Debug flags. */
>  /* logging and features */
>  #define DBG_TEX			(1 << 0)
>  #define DBG_NIR			(1 << 1)
>  #define DBG_COMPUTE		(1 << 2)
>  #define DBG_VM			(1 << 3)
> -#define DBG_CE			(1 << 4)
> +/* gap */
>  /* shader logging */
>  #define DBG_FS			(1 << 5)
>  #define DBG_VS			(1 << 6)
>  #define DBG_GS			(1 << 7)
>  #define DBG_PS			(1 << 8)
>  #define DBG_CS			(1 << 9)
>  #define DBG_TCS			(1 << 10)
>  #define DBG_TES			(1 << 11)
>  #define DBG_NO_IR		(1 << 12)
>  #define DBG_NO_TGSI		(1 << 13)
> @@ -99,21 +99,21 @@
>  #define DBG_FORCE_DMA		(1ull << 38)
>  #define DBG_PRECOMPILE		(1ull << 39)
>  #define DBG_INFO		(1ull << 40)
>  #define DBG_NO_WC		(1ull << 41)
>  #define DBG_CHECK_VM		(1ull << 42)
>  #define DBG_NO_DCC		(1ull << 43)
>  #define DBG_NO_DCC_CLEAR	(1ull << 44)
>  #define DBG_NO_RB_PLUS		(1ull << 45)
>  #define DBG_SI_SCHED		(1ull << 46)
>  #define DBG_MONOLITHIC_SHADERS	(1ull << 47)
> -#define DBG_NO_CE		(1ull << 48)
> +/* gap */
>  #define DBG_UNSAFE_MATH		(1ull << 49)
>  #define DBG_NO_DCC_FB		(1ull << 50)
>  #define DBG_TEST_VMFAULT_CP	(1ull << 51)
>  #define DBG_TEST_VMFAULT_SDMA	(1ull << 52)
>  #define DBG_TEST_VMFAULT_SHADER	(1ull << 53)
> 
>  #define R600_MAP_BUFFER_ALIGNMENT 64
>  #define R600_MAX_VIEWPORTS        16
> 
>  #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
> @@ -366,21 +366,20 @@ union r600_mmio_counters {
>  		/* SRBM_STATUS2 */
>  		struct r600_mmio_counter sdma;
> 
>  		/* CP_STAT */
>  		struct r600_mmio_counter pfp;
>  		struct r600_mmio_counter meq;
>  		struct r600_mmio_counter me;
>  		struct r600_mmio_counter surf_sync;
>  		struct r600_mmio_counter cp_dma;
>  		struct r600_mmio_counter scratch_ram;
> -		struct r600_mmio_counter ce;
>  	} named;
>  	unsigned array[0];
>  };
> 
>  struct r600_memory_object {
>  	struct pipe_memory_object	b;
>  	struct pb_buffer		*buf;
>  	uint32_t			stride;
>  	uint32_t			offset;
>  };
> diff --git a/src/gallium/drivers/radeon/r600_query.c
> b/src/gallium/drivers/radeon/r600_query.c
> index bccfe7f..98bdd80 100644
> --- a/src/gallium/drivers/radeon/r600_query.c
> +++ b/src/gallium/drivers/radeon/r600_query.c
> @@ -212,21 +212,20 @@ static bool r600_query_sw_begin(struct
> r600_common_context *rctx,
>  	case R600_QUERY_GPU_DB_BUSY:
>  	case R600_QUERY_GPU_CP_BUSY:
>  	case R600_QUERY_GPU_CB_BUSY:
>  	case R600_QUERY_GPU_SDMA_BUSY:
>  	case R600_QUERY_GPU_PFP_BUSY:
>  	case R600_QUERY_GPU_MEQ_BUSY:
>  	case R600_QUERY_GPU_ME_BUSY:
>  	case R600_QUERY_GPU_SURF_SYNC_BUSY:
>  	case R600_QUERY_GPU_CP_DMA_BUSY:
>  	case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
> -	case R600_QUERY_GPU_CE_BUSY:
>  		query->begin_result = r600_begin_counter(rctx->screen,
>  							 query->b.type);
>  		break;
>  	case R600_QUERY_NUM_COMPILATIONS:
>  		query->begin_result = 
> p_atomic_read(&rctx->screen->num_compilations);
>  		break;
>  	case R600_QUERY_NUM_SHADERS_CREATED:
>  		query->begin_result = 
> p_atomic_read(&rctx->screen->num_shaders_created);
>  		break;
>  	case R600_QUERY_NUM_SHADER_CACHE_HITS:
> @@ -369,21 +368,20 @@ static bool r600_query_sw_end(struct
> r600_common_context *rctx,
>  	case R600_QUERY_GPU_DB_BUSY:
>  	case R600_QUERY_GPU_CP_BUSY:
>  	case R600_QUERY_GPU_CB_BUSY:
>  	case R600_QUERY_GPU_SDMA_BUSY:
>  	case R600_QUERY_GPU_PFP_BUSY:
>  	case R600_QUERY_GPU_MEQ_BUSY:
>  	case R600_QUERY_GPU_ME_BUSY:
>  	case R600_QUERY_GPU_SURF_SYNC_BUSY:
>  	case R600_QUERY_GPU_CP_DMA_BUSY:
>  	case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
> -	case R600_QUERY_GPU_CE_BUSY:
>  		query->end_result = r600_end_counter(rctx->screen,
>  						     query->b.type,
>  						     query->begin_result);
>  		query->begin_result = 0;
>  		break;
>  	case R600_QUERY_NUM_COMPILATIONS:
>  		query->end_result = p_atomic_read(&rctx->screen->num_compilations);
>  		break;
>  	case R600_QUERY_NUM_SHADERS_CREATED:
>  		query->end_result = 
> p_atomic_read(&rctx->screen->num_shaders_created);
> @@ -2068,21 +2066,20 @@ static struct pipe_driver_query_info
> r600_driver_query_list[] = {
>  	X("GPU-db-busy",		GPU_DB_BUSY,		UINT64, AVERAGE),
>  	X("GPU-cp-busy",		GPU_CP_BUSY,		UINT64, AVERAGE),
>  	X("GPU-cb-busy",		GPU_CB_BUSY,		UINT64, AVERAGE),
>  	X("GPU-sdma-busy",		GPU_SDMA_BUSY,		UINT64, AVERAGE),
>  	X("GPU-pfp-busy",		GPU_PFP_BUSY,		UINT64, AVERAGE),
>  	X("GPU-meq-busy",		GPU_MEQ_BUSY,		UINT64, AVERAGE),
>  	X("GPU-me-busy",		GPU_ME_BUSY,		UINT64, AVERAGE),
>  	X("GPU-surf-sync-busy",		GPU_SURF_SYNC_BUSY,	UINT64, AVERAGE),
>  	X("GPU-cp-dma-busy",		GPU_CP_DMA_BUSY,	UINT64, AVERAGE),
>  	X("GPU-scratch-ram-busy",	GPU_SCRATCH_RAM_BUSY,	UINT64, AVERAGE),
> -	X("GPU-ce-busy",		GPU_CE_BUSY,		UINT64, AVERAGE),
>  };
> 
>  #undef X
>  #undef XG
>  #undef XFULL
> 
>  static unsigned r600_get_num_queries(struct r600_common_screen 
> *rscreen)
>  {
>  	if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
>  		return ARRAY_SIZE(r600_driver_query_list);
> diff --git a/src/gallium/drivers/radeon/r600_query.h
> b/src/gallium/drivers/radeon/r600_query.h
> index 815dc7f..7455c8e 100644
> --- a/src/gallium/drivers/radeon/r600_query.h
> +++ b/src/gallium/drivers/radeon/r600_query.h
> @@ -95,21 +95,20 @@ enum {
>  	R600_QUERY_GPU_DB_BUSY,
>  	R600_QUERY_GPU_CP_BUSY,
>  	R600_QUERY_GPU_CB_BUSY,
>  	R600_QUERY_GPU_SDMA_BUSY,
>  	R600_QUERY_GPU_PFP_BUSY,
>  	R600_QUERY_GPU_MEQ_BUSY,
>  	R600_QUERY_GPU_ME_BUSY,
>  	R600_QUERY_GPU_SURF_SYNC_BUSY,
>  	R600_QUERY_GPU_CP_DMA_BUSY,
>  	R600_QUERY_GPU_SCRATCH_RAM_BUSY,
> -	R600_QUERY_GPU_CE_BUSY,
>  	R600_QUERY_NUM_COMPILATIONS,
>  	R600_QUERY_NUM_SHADERS_CREATED,
>  	R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO,
>  	R600_QUERY_NUM_SHADER_CACHE_HITS,
>  	R600_QUERY_GPIN_ASIC_ID,
>  	R600_QUERY_GPIN_NUM_SIMD,
>  	R600_QUERY_GPIN_NUM_RB,
>  	R600_QUERY_GPIN_NUM_SPI,
>  	R600_QUERY_GPIN_NUM_SE,
> 
> diff --git a/src/gallium/drivers/radeon/radeon_winsys.h
> b/src/gallium/drivers/radeon/radeon_winsys.h
> index 351edcd..7431875 100644
> --- a/src/gallium/drivers/radeon/radeon_winsys.h
> +++ b/src/gallium/drivers/radeon/radeon_winsys.h
> @@ -450,50 +450,20 @@ struct radeon_winsys {
>       * \param flush     Flush callback function associated with the
> command stream.
>       * \param user      User pointer that will be passed to the flush 
> callback.
>       */
>      struct radeon_winsys_cs *(*cs_create)(struct radeon_winsys_ctx 
> *ctx,
>                                            enum ring_type ring_type,
>                                            void (*flush)(void *ctx,
> unsigned flags,
>  							struct pipe_fence_handle **fence),
>                                            void *flush_ctx);
> 
>      /**
> -     * Add a constant engine IB to a graphics CS. This makes the 
> graphics CS
> -     * from "cs_create" a group of two IBs that share a buffer list 
> and are
> -     * flushed together.
> -     *
> -     * The returned constant CS is only a stream for writing packets 
> to the new
> -     * IB. Calling other winsys functions with it is not allowed, not 
> even
> -     * "cs_destroy".
> -     *
> -     * In order to add buffers and check memory usage, use the 
> graphics CS.
> -     * In order to flush it, use the graphics CS, which will flush 
> both IBs.
> -     * Destroying the graphics CS will destroy both of them.
> -     *
> -     * \param cs  The graphics CS from "cs_create" that will hold the 
> buffer
> -     *            list and will be used for flushing.
> -     */
> -    struct radeon_winsys_cs *(*cs_add_const_ib)(struct 
> radeon_winsys_cs *cs);
> -
> -     /**
> -     * Add a constant engine preamble IB to a graphics CS. This add an 
> extra IB
> -     * in similar manner to cs_add_const_ib. This should always be 
> called after
> -     * cs_add_const_ib.
> -     *
> -     * The returned IB is a constant engine IB that only gets flushed 
> if the
> -     * context changed.
> -     *
> -     * \param cs  The graphics CS from "cs_create" that will hold the 
> buffer
> -     *            list and will be used for flushing.
> -     */
> -    struct radeon_winsys_cs *(*cs_add_const_preamble_ib)(struct
> radeon_winsys_cs *cs);
> -    /**
>       * Destroy a command stream.
>       *
>       * \param cs        A command stream to destroy.
>       */
>      void (*cs_destroy)(struct radeon_winsys_cs *cs);
> 
>      /**
>       * Add a buffer. Each buffer used by a CS must be added using
> this function.
>       *
>       * \param cs      Command stream
> diff --git a/src/gallium/drivers/radeonsi/si_compute.c
> b/src/gallium/drivers/radeonsi/si_compute.c
> index 5efdd39..c90fb06 100644
> --- a/src/gallium/drivers/radeonsi/si_compute.c
> +++ b/src/gallium/drivers/radeonsi/si_compute.c
> @@ -833,26 +833,22 @@ static void si_launch_grid(
>  			continue;
>  		}
>  		radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
>  					  RADEON_USAGE_READWRITE,
>  					  RADEON_PRIO_COMPUTE_GLOBAL);
>  	}
> 
>  	if (program->ir_type == PIPE_SHADER_IR_TGSI)
>  		si_setup_tgsi_grid(sctx, info);
> 
> -	si_ce_pre_draw_synchronization(sctx);
> -
>  	si_emit_dispatch_packets(sctx, info);
> 
> -	si_ce_post_draw_synchronization(sctx);
> -
>  	sctx->compute_is_busy = true;
>  	sctx->b.num_compute_calls++;
>  	if (sctx->cs_shader_state.uses_scratch)
>  		sctx->b.num_spill_compute_calls++;
> 
>  	if (cs_regalloc_hang)
>  		sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
>  }
> 
> 
> diff --git a/src/gallium/drivers/radeonsi/si_debug.c
> b/src/gallium/drivers/radeonsi/si_debug.c
> index 5a6d391..740ee63 100644
> --- a/src/gallium/drivers/radeonsi/si_debug.c
> +++ b/src/gallium/drivers/radeonsi/si_debug.c
> @@ -213,60 +213,51 @@ static void si_dump_debug_registers(struct
> si_context *sctx, FILE *f)
>  	si_dump_mmapped_reg(sctx, f, R_008218_CP_CPC_STALLED_STAT1);
>  	si_dump_mmapped_reg(sctx, f, R_00821C_CP_CPF_STATUS);
>  	si_dump_mmapped_reg(sctx, f, R_008220_CP_CPF_BUSY_STAT);
>  	si_dump_mmapped_reg(sctx, f, R_008224_CP_CPF_STALLED_STAT1);
>  	fprintf(f, "\n");
>  }
> 
>  static void si_dump_last_ib(struct si_context *sctx, FILE *f)
>  {
>  	int last_trace_id = -1;
> -	int last_ce_trace_id = -1;
> 
>  	if (!sctx->last_gfx.ib)
>  		return;
> 
>  	if (sctx->last_trace_buf) {
>  		/* We are expecting that the ddebug pipe has already
>  		 * waited for the context, so this buffer should be idle.
>  		 * If the GPU is hung, there is no point in waiting for it.
>  		 */
>  		uint32_t *map = sctx->b.ws->buffer_map(sctx->last_trace_buf->buf,
>  						       NULL,
>  						       PIPE_TRANSFER_UNSYNCHRONIZED |
>  						       PIPE_TRANSFER_READ);
> -		if (map) {
> +		if (map)
>  			last_trace_id = map[0];
> -			last_ce_trace_id = map[1];
> -		}
>  	}
> 
>  	if (sctx->init_config)
>  		ac_parse_ib(f, sctx->init_config->pm4, sctx->init_config->ndw,
>  			    -1, "IB2: Init config", sctx->b.chip_class,
>  			    NULL, NULL);
> 
>  	if (sctx->init_config_gs_rings)
>  		ac_parse_ib(f, sctx->init_config_gs_rings->pm4,
>  			    sctx->init_config_gs_rings->ndw,
>  			    -1, "IB2: Init GS rings", sctx->b.chip_class,
>  			    NULL, NULL);
> 
>  	ac_parse_ib(f, sctx->last_gfx.ib, sctx->last_gfx.num_dw,
>  		    last_trace_id, "IB", sctx->b.chip_class,
>  		     NULL, NULL);
> -
> -	if (sctx->last_ce.ib) {
> -		ac_parse_ib(f, sctx->last_ce.ib, sctx->last_ce.num_dw,
> -			    last_ce_trace_id, "CE IB", sctx->b.chip_class,
> -			    NULL, NULL);
> -	}
>  }
> 
>  static const char *priority_to_string(enum radeon_bo_priority 
> priority)
>  {
>  #define ITEM(x) [RADEON_PRIO_##x] = #x
>  	static const char *table[64] = {
>  		ITEM(FENCE),
>  	        ITEM(TRACE),
>  	        ITEM(SO_FILLED_SIZE),
>  	        ITEM(QUERY),
> @@ -847,21 +838,20 @@ static void si_dump_debug_state(struct
> pipe_context *ctx, FILE *f,
>  	}
> 
>  	if (flags & PIPE_DUMP_LAST_COMMAND_BUFFER) {
>  		si_dump_bo_list(sctx, &sctx->last_gfx, f);
>  		si_dump_last_ib(sctx, f);
> 
>  		fprintf(f, "Done.\n");
> 
>  		/* dump only once */
>  		radeon_clear_saved_cs(&sctx->last_gfx);
> -		radeon_clear_saved_cs(&sctx->last_ce);
>  		r600_resource_reference(&sctx->last_trace_buf, NULL);
>  	}
>  }
> 
>  static void si_dump_dma(struct si_context *sctx,
>  			struct radeon_saved_cs *saved, FILE *f)
>  {
>  	static const char ib_name[] = "sDMA IB";
>  	unsigned i;
> 
> diff --git a/src/gallium/drivers/radeonsi/si_descriptors.c
> b/src/gallium/drivers/radeonsi/si_descriptors.c
> index 1e0c422..8db7079 100644
> --- a/src/gallium/drivers/radeonsi/si_descriptors.c
> +++ b/src/gallium/drivers/radeonsi/si_descriptors.c
> @@ -89,206 +89,85 @@ static uint32_t null_texture_descriptor[8] = {
> 
>  static uint32_t null_image_descriptor[8] = {
>  	0,
>  	0,
>  	0,
>  	S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
>  	/* the rest must contain zeros, which is also used by the buffer
>  	 * descriptor */
>  };
> 
> -static uint16_t si_ce_ram_size(struct si_context *sctx)
> -{
> -	return sctx->b.chip_class >= GFX9 ? 4096 : 32768;
> -}
> -
>  static void si_init_descriptor_list(uint32_t *desc_list,
>  				    unsigned element_dw_size,
>  				    unsigned num_elements,
>  				    const uint32_t *null_descriptor)
>  {
>  	int i;
> 
>  	/* Initialize the array to NULL descriptors if the element size is 8. 
> */
>  	if (null_descriptor) {
>  		assert(element_dw_size % 8 == 0);
>  		for (i = 0; i < num_elements * element_dw_size / 8; i++)
>  			memcpy(desc_list + i * 8, null_descriptor, 8 * 4);
>  	}
>  }
> 
> -static void si_init_descriptors(struct si_context *sctx,
> -				struct si_descriptors *desc,
> +static void si_init_descriptors(struct si_descriptors *desc,
>  				unsigned shader_userdata_index,
>  				unsigned element_dw_size,
> -				unsigned num_elements,
> -				unsigned first_ce_slot,
> -				unsigned num_ce_slots,
> -				unsigned *ce_offset)
> +				unsigned num_elements)
>  {
>  	assert(num_elements <= sizeof(desc->dirty_mask)*8);
> 
>  	desc->list = CALLOC(num_elements, element_dw_size * 4);
>  	desc->element_dw_size = element_dw_size;
>  	desc->num_elements = num_elements;
> -	desc->first_ce_slot = sctx->ce_ib ? first_ce_slot : 0;
> -	desc->num_ce_slots = sctx->ce_ib ? num_ce_slots : 0;
>  	desc->dirty_mask = u_bit_consecutive64(0, num_elements);
>  	desc->shader_userdata_offset = shader_userdata_index * 4;
> -
> -	if (desc->num_ce_slots) {
> -		desc->uses_ce = true;
> -		desc->ce_offset = *ce_offset;
> -
> -		*ce_offset += element_dw_size * desc->num_ce_slots * 4;
> -	}
>  }
> 
>  static void si_release_descriptors(struct si_descriptors *desc)
>  {
>  	r600_resource_reference(&desc->buffer, NULL);
>  	FREE(desc->list);
>  }
> 
> -static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset,
> unsigned size,
> -			 unsigned *out_offset, struct r600_resource **out_buf)
> -{
> -	uint64_t va;
> -	unsigned cache_line_size = sctx->screen->b.info.tcc_cache_line_size;
> -
> -	/* The base and size should be aligned to the L2 cache line size
> -	 * for optimal performance. (all dumps should rewrite whole lines)
> -	 */
> -	size = align(size, cache_line_size);
> -
> -	(void)si_ce_ram_size; /* silence an "unused" warning */
> -	assert(ce_offset + size <= si_ce_ram_size(sctx));
> -
> -	u_suballocator_alloc(sctx->ce_suballocator, size, cache_line_size,
> -			     out_offset, (struct pipe_resource**)out_buf);
> -	if (!out_buf)
> -			return false;
> -
> -	va = (*out_buf)->gpu_address + *out_offset;
> -
> -	radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
> -	radeon_emit(sctx->ce_ib, ce_offset);
> -	radeon_emit(sctx->ce_ib, size / 4);
> -	radeon_emit(sctx->ce_ib, va);
> -	radeon_emit(sctx->ce_ib, va >> 32);
> -
> -	radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
> -	                       RADEON_USAGE_READWRITE, 
> RADEON_PRIO_DESCRIPTORS);
> -
> -	sctx->ce_need_synchronization = true;
> -	return true;
> -}
> -
> -void si_ce_save_all_descriptors_at_ib_end(struct si_context* sctx)
> -{
> -	bool success = si_ce_upload(sctx, 0, sctx->total_ce_ram_allocated,
> -				    &sctx->ce_ram_saved_offset,
> -				    &sctx->ce_ram_saved_buffer);
> -	(void)success;
> -	assert(success);
> -}
> -
> -void si_ce_restore_all_descriptors_at_ib_start(struct si_context 
> *sctx)
> -{
> -	if (!sctx->ce_ram_saved_buffer)
> -		return;
> -
> -	struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
> -	if (!ib)
> -		ib = sctx->ce_ib;
> -
> -	uint64_t va = sctx->ce_ram_saved_buffer->gpu_address +
> -		      sctx->ce_ram_saved_offset;
> -
> -	radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
> -	radeon_emit(ib, va);
> -	radeon_emit(ib, va >> 32);
> -	radeon_emit(ib, sctx->total_ce_ram_allocated / 4);
> -	radeon_emit(ib, 0);
> -
> -	radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
> -				  sctx->ce_ram_saved_buffer,
> -				  RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
> -}
> -
> -void si_ce_enable_loads(struct radeon_winsys_cs *ib)
> -{
> -	radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
> -	radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
> -	                CONTEXT_CONTROL_LOAD_CE_RAM(1));
> -	radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
> -}
> -
>  static bool si_upload_descriptors(struct si_context *sctx,
>  				  struct si_descriptors *desc,
>  				  struct r600_atom * atom)
>  {
>  	unsigned slot_size = desc->element_dw_size * 4;
>  	unsigned first_slot_offset = desc->first_active_slot * slot_size;
>  	unsigned upload_size = desc->num_active_slots * slot_size;
> 
>  	/* Skip the upload if no shader is using the descriptors. dirty_mask
>  	 * will stay dirty and the descriptors will be uploaded when there is
>  	 * a shader using them.
>  	 */
>  	if (!upload_size)
>  		return true;
> 
> -	if (desc->uses_ce) {
> -		const uint32_t *list = desc->list +
> -				       desc->first_ce_slot * desc->element_dw_size;
> -		uint64_t mask = (desc->dirty_mask >> desc->first_ce_slot) &
> -				u_bit_consecutive64(0, desc->num_ce_slots);
> -
> -
> -		while (mask) {
> -			int begin, count;
> -			u_bit_scan_consecutive_range64(&mask, &begin, &count);
> -
> -			begin *= desc->element_dw_size;
> -			count *= desc->element_dw_size;
> -
> -			radeon_emit(sctx->ce_ib,
> -			            PKT3(PKT3_WRITE_CONST_RAM, count, 0));
> -			radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
> -			radeon_emit_array(sctx->ce_ib, list + begin, count);
> -		}
> -
> -		if (!si_ce_upload(sctx,
> -				  desc->ce_offset +
> -				  (first_slot_offset - desc->first_ce_slot * slot_size),
> -				  upload_size, (unsigned*)&desc->buffer_offset,
> -				  &desc->buffer))
> -			return false;
> -	} else {
> -		uint32_t *ptr;
> -
> -		u_upload_alloc(sctx->b.b.const_uploader, 0, upload_size,
> -			       si_optimal_tcc_alignment(sctx, upload_size),
> -			       (unsigned*)&desc->buffer_offset,
> -			       (struct pipe_resource**)&desc->buffer,
> -			       (void**)&ptr);
> -		if (!desc->buffer)
> -			return false; /* skip the draw call */
> +	uint32_t *ptr;
> +	u_upload_alloc(sctx->b.b.const_uploader, 0, upload_size,
> +		       si_optimal_tcc_alignment(sctx, upload_size),
> +		       (unsigned*)&desc->buffer_offset,
> +		       (struct pipe_resource**)&desc->buffer,
> +		       (void**)&ptr);
> +	if (!desc->buffer)
> +		return false; /* skip the draw call */
> 
> -		util_memcpy_cpu_to_le32(ptr, (char*)desc->list + first_slot_offset,
> -					upload_size);
> -		desc->gpu_list = ptr - first_slot_offset / 4;
> +	util_memcpy_cpu_to_le32(ptr, (char*)desc->list + first_slot_offset,
> +				upload_size);
> +	desc->gpu_list = ptr - first_slot_offset / 4;
> 
> -		radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
> -	                            RADEON_USAGE_READ, 
> RADEON_PRIO_DESCRIPTORS);
> -	}
> +	radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
> +                            RADEON_USAGE_READ, 
> RADEON_PRIO_DESCRIPTORS);
> 
>  	/* The shader pointer should point to slot 0. */
>  	desc->buffer_offset -= first_slot_offset;
> 
>  	desc->dirty_mask = 0;
> 
>  	if (atom)
>  		si_mark_atom_dirty(sctx, atom);
> 
>  	return true;
> @@ -982,41 +861,36 @@ static void si_bind_sampler_states(struct
> pipe_context *ctx,
>  			continue;
> 
>  		memcpy(desc->list + desc_slot * 16 + 12, sstates[i]->val, 4*4);
>  		desc->dirty_mask |= 1ull << desc_slot;
>  		sctx->descriptors_dirty |= 1u <<
> si_sampler_and_image_descriptors_idx(shader);
>  	}
>  }
> 
>  /* BUFFER RESOURCES */
> 
> -static void si_init_buffer_resources(struct si_context *sctx,
> -				     struct si_buffer_resources *buffers,
> +static void si_init_buffer_resources(struct si_buffer_resources 
> *buffers,
>  				     struct si_descriptors *descs,
>  				     unsigned num_buffers,
> -				     unsigned first_ce_slot,
> -				     unsigned num_ce_slots,
>  				     unsigned shader_userdata_index,
>  				     enum radeon_bo_usage shader_usage,
>  				     enum radeon_bo_usage shader_usage_constbuf,
>  				     enum radeon_bo_priority priority,
> -				     enum radeon_bo_priority priority_constbuf,
> -				     unsigned *ce_offset)
> +				     enum radeon_bo_priority priority_constbuf)
>  {
>  	buffers->shader_usage = shader_usage;
>  	buffers->shader_usage_constbuf = shader_usage_constbuf;
>  	buffers->priority = priority;
>  	buffers->priority_constbuf = priority_constbuf;
>  	buffers->buffers = CALLOC(num_buffers, sizeof(struct 
> pipe_resource*));
> 
> -	si_init_descriptors(sctx, descs, shader_userdata_index, 4, 
> num_buffers,
> -			    first_ce_slot, num_ce_slots, ce_offset);
> +	si_init_descriptors(descs, shader_userdata_index, 4, num_buffers);
>  }
> 
>  static void si_release_buffer_resources(struct si_buffer_resources 
> *buffers,
>  					struct si_descriptors *descs)
>  {
>  	int i;
> 
>  	for (i = 0; i < descs->num_elements; i++) {
>  		pipe_resource_reference(&buffers->buffers[i], NULL);
>  	}
> @@ -2730,147 +2604,73 @@ void
> si_all_resident_buffers_begin_new_cs(struct si_context *sctx)
>  					   RADEON_USAGE_READWRITE,
>  					   false, false);
>  	}
> 
>  	sctx->b.num_resident_handles += num_resident_tex_handles +
>  					num_resident_img_handles;
>  }
> 
>  /* INIT/DEINIT/UPLOAD */
> 
> -/* GFX9 has only 4KB of CE, while previous chips had 32KB. In order
> - * to make CE RAM as useful as possible, this defines limits
> - * for the number slots that can be in CE RAM on GFX9. If a shader
> - * is using more, descriptors will be uploaded to memory directly and
> - * CE won't be used.
> - *
> - * These numbers are based on shader-db.
> - */
> -static unsigned gfx9_max_ce_samplers[SI_NUM_SHADERS] = {
> -	[PIPE_SHADER_VERTEX] = 0,
> -	[PIPE_SHADER_TESS_CTRL] = 0,
> -	[PIPE_SHADER_TESS_EVAL] = 1,
> -	[PIPE_SHADER_GEOMETRY] = 0,
> -	[PIPE_SHADER_FRAGMENT] = 24,
> -	[PIPE_SHADER_COMPUTE] = 16,
> -};
> -static unsigned gfx9_max_ce_images[SI_NUM_SHADERS] = {
> -	/* these must be even due to slot alignment */
> -	[PIPE_SHADER_VERTEX] = 0,
> -	[PIPE_SHADER_TESS_CTRL] = 0,
> -	[PIPE_SHADER_TESS_EVAL] = 0,
> -	[PIPE_SHADER_GEOMETRY] = 0,
> -	[PIPE_SHADER_FRAGMENT] = 2,
> -	[PIPE_SHADER_COMPUTE] = 8,
> -};
> -static unsigned gfx9_max_ce_const_buffers[SI_NUM_SHADERS] = {
> -	[PIPE_SHADER_VERTEX] = 9,
> -	[PIPE_SHADER_TESS_CTRL] = 3,
> -	[PIPE_SHADER_TESS_EVAL] = 5,
> -	[PIPE_SHADER_GEOMETRY] = 0,
> -	[PIPE_SHADER_FRAGMENT] = 8,
> -	[PIPE_SHADER_COMPUTE] = 6,
> -};
> -static unsigned gfx9_max_ce_shader_buffers[SI_NUM_SHADERS] = {
> -	[PIPE_SHADER_VERTEX] = 0,
> -	[PIPE_SHADER_TESS_CTRL] = 0,
> -	[PIPE_SHADER_TESS_EVAL] = 0,
> -	[PIPE_SHADER_GEOMETRY] = 0,
> -	[PIPE_SHADER_FRAGMENT] = 12,
> -	[PIPE_SHADER_COMPUTE] = 13,
> -};
> -
>  void si_init_all_descriptors(struct si_context *sctx)
>  {
>  	int i;
> -	unsigned ce_offset = 0;
> 
>  	STATIC_ASSERT(GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS % 2 == 0);
>  	STATIC_ASSERT(GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS % 2 == 0);
> 
>  	for (i = 0; i < SI_NUM_SHADERS; i++) {
>  		bool gfx9_tcs = false;
>  		bool gfx9_gs = false;
>  		unsigned num_sampler_slots = SI_NUM_IMAGES / 2 + SI_NUM_SAMPLERS;
>  		unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + 
> SI_NUM_CONST_BUFFERS;
> 
> -		unsigned first_sampler_ce_slot = 0;
> -		unsigned num_sampler_ce_slots = num_sampler_slots;
> -
> -		unsigned first_buffer_ce_slot = 0;
> -		unsigned num_buffer_ce_slots = num_buffer_slots;
> -
> -		/* Adjust CE slot ranges based on GFX9 CE RAM limits. */
> -		if (sctx->b.chip_class >= GFX9) {
> -			gfx9_tcs = i == PIPE_SHADER_TESS_CTRL;
> -			gfx9_gs = i == PIPE_SHADER_GEOMETRY;
> -
> -			first_sampler_ce_slot =
> -				si_get_image_slot(gfx9_max_ce_images[i] - 1) / 2;
> -			num_sampler_ce_slots = gfx9_max_ce_images[i] / 2 +
> -					       gfx9_max_ce_samplers[i];
> -
> -			first_buffer_ce_slot =
> -				si_get_shaderbuf_slot(gfx9_max_ce_shader_buffers[i] - 1);
> -			num_buffer_ce_slots = gfx9_max_ce_shader_buffers[i] +
> -					      gfx9_max_ce_const_buffers[i];
> -		}
> -
> -		si_init_buffer_resources(sctx, &sctx->const_and_shader_buffers[i],
> +		si_init_buffer_resources(&sctx->const_and_shader_buffers[i],
>  					 si_const_and_shader_buffer_descriptors(sctx, i),
>  					 num_buffer_slots,
> -					 first_buffer_ce_slot, num_buffer_ce_slots,
>  					 gfx9_tcs ? GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS :
>  					 gfx9_gs ? GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS :
>  						   SI_SGPR_CONST_AND_SHADER_BUFFERS,
>  					 RADEON_USAGE_READWRITE,
>  					 RADEON_USAGE_READ,
>  					 RADEON_PRIO_SHADER_RW_BUFFER,
> -					 RADEON_PRIO_CONST_BUFFER,
> -					 &ce_offset);
> +					 RADEON_PRIO_CONST_BUFFER);
> 
>  		struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, 
> i);
> -		si_init_descriptors(sctx, desc,
> +		si_init_descriptors(desc,
>  				    gfx9_tcs ? GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES :
>  				    gfx9_gs ? GFX9_SGPR_GS_SAMPLERS_AND_IMAGES :
>  					      SI_SGPR_SAMPLERS_AND_IMAGES,
> -				    16, num_sampler_slots,
> -				    first_sampler_ce_slot, num_sampler_ce_slots,
> -				    &ce_offset);
> +				    16, num_sampler_slots);
> 
>  		int j;
>  		for (j = 0; j < SI_NUM_IMAGES; j++)
>  			memcpy(desc->list + j * 8, null_image_descriptor, 8 * 4);
>  		for (; j < SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2; j++)
>  			memcpy(desc->list + j * 8, null_texture_descriptor, 8 * 4);
>  	}
> 
> -	si_init_buffer_resources(sctx, &sctx->rw_buffers,
> +	si_init_buffer_resources(&sctx->rw_buffers,
>  				 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
> -				 SI_NUM_RW_BUFFERS, 0, SI_NUM_RW_BUFFERS,
> -				 SI_SGPR_RW_BUFFERS,
> +				 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
>  				 /* The second set of usage/priority is used by
>  				  * const buffers in RW buffer slots. */
>  				 RADEON_USAGE_READWRITE, RADEON_USAGE_READ,
> -				 RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER,
> -				 &ce_offset);
> +				 RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER);
>  	sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots = 
> SI_NUM_RW_BUFFERS;
> 
> -	si_init_descriptors(sctx, &sctx->vertex_buffers, 
> SI_SGPR_VERTEX_BUFFERS,
> -			    4, SI_NUM_VERTEX_BUFFERS, 0, 0, NULL);
> +	si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
> +			    4, SI_NUM_VERTEX_BUFFERS);
>  	FREE(sctx->vertex_buffers.list); /* not used */
>  	sctx->vertex_buffers.list = NULL;
> 
>  	sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
> -	sctx->total_ce_ram_allocated = ce_offset;
> -
> -	assert(ce_offset <= si_ce_ram_size(sctx));
> 
>  	/* Set pipe_context functions. */
>  	sctx->b.b.bind_sampler_states = si_bind_sampler_states;
>  	sctx->b.b.set_shader_images = si_set_shader_images;
>  	sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
>  	sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
>  	sctx->b.b.set_shader_buffers = si_set_shader_buffers;
>  	sctx->b.b.set_sampler_views = si_set_sampler_views;
>  	sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
>  	sctx->b.b.create_texture_handle = si_create_texture_handle;
> @@ -3005,40 +2805,20 @@ void si_set_active_descriptors(struct
> si_context *sctx, unsigned desc_idx,
> 
>  	int first, count;
>  	u_bit_scan_consecutive_range64(&new_active_mask, &first, &count);
>  	assert(new_active_mask == 0);
> 
>  	/* Upload/dump descriptors if slots are being enabled. */
>  	if (first < desc->first_active_slot ||
>  	    first + count > desc->first_active_slot + desc->num_active_slots)
>  		sctx->descriptors_dirty |= 1u << desc_idx;
> 
> -	/* Enable or disable CE for this descriptor array. */
> -	bool used_ce = desc->uses_ce;
> -	desc->uses_ce = desc->first_ce_slot <= first &&
> -			desc->first_ce_slot + desc->num_ce_slots >= first + count;
> -
> -	if (desc->uses_ce != used_ce) {
> -		/* Upload or dump descriptors if we're disabling or enabling CE,
> -		 * respectively. */
> -		sctx->descriptors_dirty |= 1u << desc_idx;
> -
> -		/* If we're enabling CE, re-upload all descriptors to CE RAM.
> -		 * When CE was disabled, uploads to CE RAM stopped.
> -		 */
> -		if (desc->uses_ce) {
> -			desc->dirty_mask |=
> -				u_bit_consecutive64(desc->first_ce_slot,
> -						    desc->num_ce_slots);
> -		}
> -	}
> -
>  	desc->first_active_slot = first;
>  	desc->num_active_slots = count;
>  }
> 
>  void si_set_active_descriptors_for_shader(struct si_context *sctx,
>  					  struct si_shader_selector *sel)
>  {
>  	if (!sel)
>  		return;
> 
> diff --git a/src/gallium/drivers/radeonsi/si_hw_context.c
> b/src/gallium/drivers/radeonsi/si_hw_context.c
> index d9170c3..20063b8 100644
> --- a/src/gallium/drivers/radeonsi/si_hw_context.c
> +++ b/src/gallium/drivers/radeonsi/si_hw_context.c
> @@ -20,55 +20,24 @@
>   * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
> OR THE
>   * USE OR OTHER DEALINGS IN THE SOFTWARE.
>   *
>   * Authors:
>   *      Jerome Glisse
>   */
> 
>  #include "si_pipe.h"
>  #include "radeon/r600_cs.h"
> 
> -static unsigned si_descriptor_list_cs_space(unsigned count, unsigned
> element_size)
> -{
> -	/* Ensure we have enough space to start a new range in a hole */
> -	assert(element_size >= 3);
> -
> -	/* 5 dwords for write to L2 + 3 bytes for the packet header of
> -	 * every disjoint range written to CE RAM.
> -	 */
> -	return 5 + (3 * count / 2) + count * element_size;
> -}
> -
> -static unsigned si_ce_needed_cs_space(void)
> -{
> -	unsigned space = 0;
> -
> -	space += si_descriptor_list_cs_space(SI_NUM_SHADER_BUFFERS +
> -					     SI_NUM_CONST_BUFFERS, 4);
> -	/* two 8-byte images share one 16-byte slot */
> -	space += si_descriptor_list_cs_space(SI_NUM_IMAGES / 2 +
> -					     SI_NUM_SAMPLERS, 16);
> -	space *= SI_NUM_SHADERS;
> -
> -	space += si_descriptor_list_cs_space(SI_NUM_RW_BUFFERS, 4);
> -
> -	/* Increment CE counter packet */
> -	space += 2;
> -
> -	return space;
> -}
> -
>  /* initialize */
>  void si_need_cs_space(struct si_context *ctx)
>  {
>  	struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
> -	struct radeon_winsys_cs *ce_ib = ctx->ce_ib;
> 
>  	/* There is no need to flush the DMA IB here, because
>  	 * r600_need_dma_space always flushes the GFX IB if there is
>  	 * a conflict, which means any unflushed DMA commands automatically
>  	 * precede the GFX IB (= they had no dependency on the GFX IB when
>  	 * they were submitted).
>  	 */
> 
>  	/* There are two memory usage counters in the winsys for all buffers
>  	 * that have been added (cs_add_buffer) and two counters in the pipe
> @@ -80,22 +49,21 @@ void si_need_cs_space(struct si_context *ctx)
>  		ctx->b.vram = 0;
>  		ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
>  		return;
>  	}
>  	ctx->b.gtt = 0;
>  	ctx->b.vram = 0;
> 
>  	/* If the CS is sufficiently large, don't count the space needed
>  	 * and just flush if there is not enough space left.
>  	 */
> -	if (!ctx->b.ws->cs_check_space(cs, 2048) ||
> -	    (ce_ib && !ctx->b.ws->cs_check_space(ce_ib, 
> si_ce_needed_cs_space())))
> +	if (!ctx->b.ws->cs_check_space(cs, 2048))
>  		ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
>  }
> 
>  void si_context_gfx_flush(void *context, unsigned flags,
>  			  struct pipe_fence_handle **fence)
>  {
>  	struct si_context *ctx = context;
>  	struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
>  	struct radeon_winsys *ws = ctx->b.ws;
> 
> @@ -116,45 +84,39 @@ void si_context_gfx_flush(void *context, unsigned 
> flags,
>  	 * This code is only needed when the driver flushes the GFX IB
>  	 * internally, and it never asks for a fence handle.
>  	 */
>  	if (radeon_emitted(ctx->b.dma.cs, 0)) {
>  		assert(fence == NULL); /* internal flushes only */
>  		ctx->b.dma.flush(ctx, flags, NULL);
>  	}
> 
>  	ctx->gfx_flush_in_progress = true;
> 
> -	/* This CE dump should be done in parallel with the last draw. */
> -	if (ctx->ce_ib)
> -		si_ce_save_all_descriptors_at_ib_end(ctx);
> -
>  	r600_preflush_suspend_features(&ctx->b);
> 
>  	ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
>  			SI_CONTEXT_PS_PARTIAL_FLUSH;
> 
>  	/* DRM 3.1.0 doesn't flush TC for VI correctly. */
>  	if (ctx->b.chip_class == VI && ctx->b.screen->info.drm_minor <= 1)
>  		ctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2 |
>  				SI_CONTEXT_INV_VMEM_L1;
> 
>  	si_emit_cache_flush(ctx);
> 
>  	if (ctx->trace_buf)
>  		si_trace_emit(ctx);
> 
>  	if (ctx->is_debug) {
>  		/* Save the IB for debug contexts. */
>  		radeon_clear_saved_cs(&ctx->last_gfx);
>  		radeon_save_cs(ws, cs, &ctx->last_gfx, true);
> -		radeon_clear_saved_cs(&ctx->last_ce);
> -		radeon_save_cs(ws, ctx->ce_ib, &ctx->last_ce, false);
>  		r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
>  		r600_resource_reference(&ctx->trace_buf, NULL);
>  	}
> 
>  	/* Flush the CS. */
>  	ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
>  	if (fence)
>  		ws->fence_reference(fence, ctx->b.last_gfx_fence);
>  	ctx->b.num_gfx_cs_flushes++;
> 
> @@ -168,27 +130,27 @@ void si_context_gfx_flush(void *context, unsigned 
> flags,
>  		si_check_vm_faults(&ctx->b, &ctx->last_gfx, RING_GFX);
>  	}
> 
>  	si_begin_new_cs(ctx);
>  	ctx->gfx_flush_in_progress = false;
>  }
> 
>  void si_begin_new_cs(struct si_context *ctx)
>  {
>  	if (ctx->is_debug) {
> -		static const uint32_t zeros[2];
> +		static const uint32_t zeros[1];
> 
>  		/* Create a buffer used for writing trace IDs and initialize it to 
> 0. */
>  		assert(!ctx->trace_buf);
>  		ctx->trace_buf = (struct r600_resource*)
>  				 pipe_buffer_create(ctx->b.b.screen, 0,
> -						    PIPE_USAGE_STAGING, 8);
> +						    PIPE_USAGE_STAGING, 4);
>  		if (ctx->trace_buf)
>  			pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
>  						    0, sizeof(zeros), zeros);
>  		ctx->trace_id = 0;
>  	}
> 
>  	if (ctx->trace_buf)
>  		si_trace_emit(ctx);
> 
>  	/* Flush read caches at the beginning of CS not flushed by the 
> kernel. */
> @@ -201,28 +163,20 @@ void si_begin_new_cs(struct si_context *ctx)
>  	/* set all valid group as dirty so they get reemited on
>  	 * next draw command
>  	 */
>  	si_pm4_reset_emitted(ctx);
> 
>  	/* The CS initialization should be emitted before everything else. */
>  	si_pm4_emit(ctx, ctx->init_config);
>  	if (ctx->init_config_gs_rings)
>  		si_pm4_emit(ctx, ctx->init_config_gs_rings);
> 
> -	if (ctx->ce_preamble_ib)
> -		si_ce_enable_loads(ctx->ce_preamble_ib);
> -	else if (ctx->ce_ib)
> -		si_ce_enable_loads(ctx->ce_ib);
> -
> -	if (ctx->ce_ib)
> -		si_ce_restore_all_descriptors_at_ib_start(ctx);
> -
>  	if (ctx->queued.named.ls)
>  		ctx->prefetch_L2_mask |= SI_PREFETCH_LS;
>  	if (ctx->queued.named.hs)
>  		ctx->prefetch_L2_mask |= SI_PREFETCH_HS;
>  	if (ctx->queued.named.es)
>  		ctx->prefetch_L2_mask |= SI_PREFETCH_ES;
>  	if (ctx->queued.named.gs)
>  		ctx->prefetch_L2_mask |= SI_PREFETCH_GS;
>  	if (ctx->queued.named.vs)
>  		ctx->prefetch_L2_mask |= SI_PREFETCH_VS;
> diff --git a/src/gallium/drivers/radeonsi/si_pipe.c
> b/src/gallium/drivers/radeonsi/si_pipe.c
> index 80a77a8..82f0c58 100644
> --- a/src/gallium/drivers/radeonsi/si_pipe.c
> +++ b/src/gallium/drivers/radeonsi/si_pipe.c
> @@ -47,24 +47,20 @@ static void si_destroy_context(struct pipe_context 
> *context)
> 
>  	/* Unreference the framebuffer normally to disable related logic
>  	 * properly.
>  	 */
>  	struct pipe_framebuffer_state fb = {};
>  	if (context->set_framebuffer_state)
>  		context->set_framebuffer_state(context, &fb);
> 
>  	si_release_all_descriptors(sctx);
> 
> -	if (sctx->ce_suballocator)
> -		u_suballocator_destroy(sctx->ce_suballocator);
> -
> -	r600_resource_reference(&sctx->ce_ram_saved_buffer, NULL);
>  	pipe_resource_reference(&sctx->esgs_ring, NULL);
>  	pipe_resource_reference(&sctx->gsvs_ring, NULL);
>  	pipe_resource_reference(&sctx->tf_ring, NULL);
>  	pipe_resource_reference(&sctx->tess_offchip_ring, NULL);
>  	pipe_resource_reference(&sctx->null_const_buf.buffer, NULL);
>  	r600_resource_reference(&sctx->border_color_buffer, NULL);
>  	free(sctx->border_color_table);
>  	r600_resource_reference(&sctx->scratch_buffer, NULL);
>  	r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
>  	r600_resource_reference(&sctx->wait_mem_scratch, NULL);
> @@ -193,59 +189,20 @@ static struct pipe_context
> *si_create_context(struct pipe_screen *screen,
>  	if (sscreen->b.info.has_hw_decode) {
>  		sctx->b.b.create_video_codec = si_uvd_create_decoder;
>  		sctx->b.b.create_video_buffer = si_video_buffer_create;
>  	} else {
>  		sctx->b.b.create_video_codec = vl_create_decoder;
>  		sctx->b.b.create_video_buffer = vl_video_buffer_create;
>  	}
> 
>  	sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX,
>  				       si_context_gfx_flush, sctx);
> -
> -	bool enable_ce = sscreen->b.chip_class != SI && /* SI hangs */
> -			 /* These can't use CE due to a power gating bug in the kernel. */
> -			 sscreen->b.family != CHIP_CARRIZO &&
> -			 sscreen->b.family != CHIP_STONEY;
> -
> -	/* CE is currently disabled by default, because it makes s_load 
> latency
> -	 * worse, because CE IB doesn't run in lockstep with DE.
> -	 * Remove this line after that performance issue has been resolved.
> -	 */
> -	enable_ce = false;
> -
> -	/* Apply CE overrides. */
> -	if (sscreen->b.debug_flags & DBG_NO_CE)
> -		enable_ce = false;
> -	else if (sscreen->b.debug_flags & DBG_CE)
> -		enable_ce = true;
> -
> -	if (ws->cs_add_const_ib && enable_ce) {
> -		sctx->ce_ib = ws->cs_add_const_ib(sctx->b.gfx.cs);
> -		if (!sctx->ce_ib)
> -			goto fail;
> -
> -		if (ws->cs_add_const_preamble_ib) {
> -			sctx->ce_preamble_ib =
> -			           ws->cs_add_const_preamble_ib(sctx->b.gfx.cs);
> -
> -			if (!sctx->ce_preamble_ib)
> -				goto fail;
> -		}
> -
> -		sctx->ce_suballocator =
> -			u_suballocator_create(&sctx->b.b, 1024 * 1024, 0,
> -					      PIPE_USAGE_DEFAULT,
> -					      R600_RESOURCE_FLAG_UNMAPPABLE, false);
> -		if (!sctx->ce_suballocator)
> -			goto fail;
> -	}
> -
>  	sctx->b.gfx.flush = si_context_gfx_flush;
> 
>  	/* Border colors. */
>  	sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
>  					  sizeof(*sctx->border_color_table));
>  	if (!sctx->border_color_table)
>  		goto fail;
> 
>  	sctx->border_color_buffer = (struct r600_resource*)
>  		pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT,
> diff --git a/src/gallium/drivers/radeonsi/si_pipe.h
> b/src/gallium/drivers/radeonsi/si_pipe.h
> index 8d82287..bb5e189 100644
> --- a/src/gallium/drivers/radeonsi/si_pipe.h
> +++ b/src/gallium/drivers/radeonsi/si_pipe.h
> @@ -274,29 +274,21 @@ struct si_context {
>  	void				*custom_dsa_flush;
>  	void				*custom_blend_resolve;
>  	void				*custom_blend_fmask_decompress;
>  	void				*custom_blend_eliminate_fastclear;
>  	void				*custom_blend_dcc_decompress;
>  	struct si_screen		*screen;
>  	LLVMTargetMachineRef		tm; /* only non-threaded compilation */
>  	struct si_shader_ctx_state	fixed_func_tcs_shader;
>  	struct r600_resource		*wait_mem_scratch;
>  	unsigned			wait_mem_number;
> -
> -	struct radeon_winsys_cs		*ce_ib;
> -	struct radeon_winsys_cs		*ce_preamble_ib;
> -	struct r600_resource		*ce_ram_saved_buffer;
> -	struct u_suballocator		*ce_suballocator;
> -	unsigned			ce_ram_saved_offset;
> -	uint16_t			total_ce_ram_allocated;
>  	uint16_t			prefetch_L2_mask;
> -	bool				ce_need_synchronization:1;
> 
>  	bool				gfx_flush_in_progress:1;
>  	bool				compute_is_busy:1;
> 
>  	/* Atoms (direct states). */
>  	union si_state_atoms		atoms;
>  	unsigned			dirty_atoms; /* mask */
>  	/* PM4 states (precomputed immutable states) */
>  	unsigned			dirty_states;
>  	union si_state			queued;
> @@ -413,21 +405,20 @@ struct si_context {
>  	struct si_shader	*last_ls;
>  	struct si_shader_selector *last_tcs;
>  	int			last_num_tcs_input_cp;
>  	int			last_tes_sh_base;
>  	bool			last_tess_uses_primid;
>  	unsigned		last_num_patches;
> 
>  	/* Debug state. */
>  	bool			is_debug;
>  	struct radeon_saved_cs	last_gfx;
> -	struct radeon_saved_cs	last_ce;
>  	struct r600_resource	*last_trace_buf;
>  	struct r600_resource	*trace_buf;
>  	unsigned		trace_id;
>  	uint64_t		dmesg_timestamp;
>  	unsigned		apitrace_call_number;
> 
>  	/* Other state */
>  	bool need_check_render_feedback;
>  	bool			decompression_enabled;
> 
> diff --git a/src/gallium/drivers/radeonsi/si_state.h
> b/src/gallium/drivers/radeonsi/si_state.h
> index bce4066..fd95aa0 100644
> --- a/src/gallium/drivers/radeonsi/si_state.h
> +++ b/src/gallium/drivers/radeonsi/si_state.h
> @@ -229,41 +229,28 @@ struct si_descriptors {
> 
>  	/* The buffer where the descriptors have been uploaded. */
>  	struct r600_resource *buffer;
>  	int buffer_offset; /* can be negative if not using lower slots */
> 
>  	/* The size of one descriptor. */
>  	ubyte element_dw_size;
>  	/* The maximum number of descriptors. */
>  	ubyte num_elements;
> 
> -	/* Offset in CE RAM */
> -	uint16_t ce_offset;
> -
> -	/* Slots allocated in CE RAM. If we get active slots outside of this
> -	 * range, direct uploads to memory will be used instead. This 
> basically
> -	 * governs switching between onchip (CE) and offchip (upload) modes.
> -	 */
> -	ubyte first_ce_slot;
> -	ubyte num_ce_slots;
> -
>  	/* Slots that are used by currently-bound shaders.
>  	 * With CE: It determines which slots are dumped to L2.
>  	 *          It doesn't skip uploads to CE RAM.
>  	 * Without CE: It determines which slots are uploaded.
>  	 */
>  	ubyte first_active_slot;
>  	ubyte num_active_slots;
> 
> -	/* Whether CE is used to upload this descriptor array. */
> -	bool uses_ce;
> -
>  	/* The SGPR index where the 64-bit pointer to the descriptor array 
> will
>  	 * be stored. */
>  	ubyte shader_userdata_offset;
>  };
> 
>  struct si_sampler_views {
>  	struct pipe_sampler_view	*views[SI_NUM_SAMPLERS];
>  	struct si_sampler_state		*sampler_states[SI_NUM_SAMPLERS];
> 
>  	/* The i-th bit is set if that element is enabled (non-NULL 
> resource). */
> @@ -300,23 +287,20 @@ struct si_buffer_resources {
>  #define si_pm4_delete_state(sctx, member, value) \
>  	do { \
>  		if ((sctx)->queued.named.member == (value)) { \
>  			(sctx)->queued.named.member = NULL; \
>  		} \
>  		si_pm4_free_state(sctx, (struct si_pm4_state *)(value), \
>  				  si_pm4_block_idx(member)); \
>  	} while(0)
> 
>  /* si_descriptors.c */
> -void si_ce_save_all_descriptors_at_ib_end(struct si_context* sctx);
> -void si_ce_restore_all_descriptors_at_ib_start(struct si_context 
> *sctx);
> -void si_ce_enable_loads(struct radeon_winsys_cs *ib);
>  void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
>  				    struct r600_texture *tex,
>  				    const struct legacy_surf_level *base_level_info,
>  				    unsigned base_level, unsigned first_level,
>  				    unsigned block_width, bool is_stencil,
>  				    uint32_t *state);
>  void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
>  				 uint slot, struct pipe_constant_buffer *cbuf);
>  void si_get_shader_buffers(struct si_context *sctx,
>  			   enum pipe_shader_type shader,
> @@ -394,22 +378,20 @@ void si_init_shader_functions(struct si_context 
> *sctx);
>  bool si_init_shader_cache(struct si_screen *sscreen);
>  void si_destroy_shader_cache(struct si_screen *sscreen);
>  void si_init_shader_selector_async(void *job, int thread_index);
>  void si_get_active_slot_masks(const struct tgsi_shader_info *info,
>  			      uint32_t *const_and_shader_buffers,
>  			      uint64_t *samplers_and_images);
> 
>  /* si_state_draw.c */
>  void si_init_ia_multi_vgt_param_table(struct si_context *sctx);
>  void si_emit_cache_flush(struct si_context *sctx);
> -void si_ce_pre_draw_synchronization(struct si_context *sctx);
> -void si_ce_post_draw_synchronization(struct si_context *sctx);
>  void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info 
> *dinfo);
>  void si_trace_emit(struct si_context *sctx);
> 
> 
>  static inline unsigned
>  si_tile_mode_index(struct r600_texture *rtex, unsigned level, bool 
> stencil)
>  {
>  	if (stencil)
>  		return rtex->surface.u.legacy.stencil_tiling_index[level];
>  	else
> diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c
> b/src/gallium/drivers/radeonsi/si_state_draw.c
> index a26e38d..db8f77d 100644
> --- a/src/gallium/drivers/radeonsi/si_state_draw.c
> +++ b/src/gallium/drivers/radeonsi/si_state_draw.c
> @@ -1137,41 +1137,20 @@ static void si_get_draw_start_count(struct
> si_context *sctx,
>  			*count = end - begin;
>  		} else {
>  			*start = *count = 0;
>  		}
>  	} else {
>  		*start = info->start;
>  		*count = info->count;
>  	}
>  }
> 
> -void si_ce_pre_draw_synchronization(struct si_context *sctx)
> -{
> -	if (sctx->ce_need_synchronization) {
> -		radeon_emit(sctx->ce_ib, PKT3(PKT3_INCREMENT_CE_COUNTER, 0, 0));
> -		radeon_emit(sctx->ce_ib, 1); /* 1 = increment CE counter */
> -
> -		radeon_emit(sctx->b.gfx.cs, PKT3(PKT3_WAIT_ON_CE_COUNTER, 0, 0));
> -		radeon_emit(sctx->b.gfx.cs, 0); /* 0 = don't flush sL1 conditionally 
> */
> -	}
> -}
> -
> -void si_ce_post_draw_synchronization(struct si_context *sctx)
> -{
> -	if (sctx->ce_need_synchronization) {
> -		radeon_emit(sctx->b.gfx.cs, PKT3(PKT3_INCREMENT_DE_COUNTER, 0, 0));
> -		radeon_emit(sctx->b.gfx.cs, 0); /* unused */
> -
> -		sctx->ce_need_synchronization = false;
> -	}
> -}
> -
>  static void si_emit_all_states(struct si_context *sctx, const struct
> pipe_draw_info *info,
>  			       unsigned skip_atom_mask)
>  {
>  	/* Emit state atoms. */
>  	unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
>  	while (mask) {
>  		struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
> 
>  		atom->emit(&sctx->b, atom);
>  	}
> @@ -1402,21 +1381,20 @@ void si_draw_vbo(struct pipe_context *ctx,
> const struct pipe_draw_info *info)
>  		/* <-- CUs are idle here. */
>  		if (!si_upload_graphics_shader_descriptors(sctx))
>  			return;
> 
>  		/* Set shader pointers after descriptors are uploaded. */
>  		if (si_is_atom_dirty(sctx, shader_pointers)) {
>  			shader_pointers->emit(&sctx->b, NULL);
>  			sctx->dirty_atoms = 0;
>  		}
> 
> -		si_ce_pre_draw_synchronization(sctx);
>  		si_emit_draw_packets(sctx, info, indexbuf, index_size, 
> index_offset);
>  		/* <-- CUs are busy here. */
> 
>  		/* Start prefetches after the draw has been started. Both will run
>  		 * in parallel, but starting the draw first is more important.
>  		 */
>  		if (sctx->b.chip_class >= CIK && sctx->prefetch_L2_mask)
>  			cik_emit_prefetch_L2(sctx);
>  	} else {
>  		/* If we don't wait for idle, start prefetches first, then set
> @@ -1425,26 +1403,23 @@ void si_draw_vbo(struct pipe_context *ctx,
> const struct pipe_draw_info *info)
>  		if (sctx->b.flags)
>  			si_emit_cache_flush(sctx);
> 
>  		if (sctx->b.chip_class >= CIK && sctx->prefetch_L2_mask)
>  			cik_emit_prefetch_L2(sctx);
> 
>  		if (!si_upload_graphics_shader_descriptors(sctx))
>  			return;
> 
>  		si_emit_all_states(sctx, info, 0);
> -		si_ce_pre_draw_synchronization(sctx);
>  		si_emit_draw_packets(sctx, info, indexbuf, index_size, 
> index_offset);
>  	}
> 
> -	si_ce_post_draw_synchronization(sctx);
> -
>  	if (sctx->trace_buf)
>  		si_trace_emit(sctx);
> 
>  	/* Workaround for a VGT hang when streamout is enabled.
>  	 * It must be done after drawing. */
>  	if ((sctx->b.family == CHIP_HAWAII ||
>  	     sctx->b.family == CHIP_TONGA ||
>  	     sctx->b.family == CHIP_FIJI) &&
>  	    r600_get_strmout_en(&sctx->b)) {
>  		sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
> @@ -1475,25 +1450,11 @@ void si_trace_emit(struct si_context *sctx)
> 
>  	radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
>  	radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
>  		    S_370_WR_CONFIRM(1) |
>  		    S_370_ENGINE_SEL(V_370_ME));
>  	radeon_emit(cs, sctx->trace_buf->gpu_address);
>  	radeon_emit(cs, sctx->trace_buf->gpu_address >> 32);
>  	radeon_emit(cs, sctx->trace_id);
>  	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
>  	radeon_emit(cs, AC_ENCODE_TRACE_POINT(sctx->trace_id));
> -
> -	if (sctx->ce_ib) {
> -		struct radeon_winsys_cs *ce = sctx->ce_ib;
> -
> -		radeon_emit(ce, PKT3(PKT3_WRITE_DATA, 3, 0));
> -		radeon_emit(ce, S_370_DST_SEL(V_370_MEM_ASYNC) |
> -			    S_370_WR_CONFIRM(1) |
> -			    S_370_ENGINE_SEL(V_370_CE));
> -		radeon_emit(ce, sctx->trace_buf->gpu_address + 4);
> -		radeon_emit(ce, (sctx->trace_buf->gpu_address + 4) >> 32);
> -		radeon_emit(ce, sctx->trace_id);
> -		radeon_emit(ce, PKT3(PKT3_NOP, 0, 0));
> -		radeon_emit(ce, AC_ENCODE_TRACE_POINT(sctx->trace_id));
> -	}
>  }
> diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
> b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
> index d266253..9cadfc4 100644
> --- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
> +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
> @@ -559,26 +559,20 @@ static bool amdgpu_ib_new_buffer(struct
> amdgpu_winsys *ws, struct amdgpu_ib *ib)
>      * INDIRECT_BUFFER packet.
>      */
>     if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
>        buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
>     else
>        buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
> 
>     buffer_size = MIN2(buffer_size, 4 * 512 * 1024);
> 
>     switch (ib->ib_type) {
> -   case IB_CONST_PREAMBLE:
> -      buffer_size = MAX2(buffer_size, 4 * 1024);
> -      break;
> -   case IB_CONST:
> -      buffer_size = MAX2(buffer_size, 16 * 1024 * 4);
> -      break;
>     case IB_MAIN:
>        buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
>        break;
>     default:
>        unreachable("unhandled IB type");
>     }
> 
>     pb = ws->base.buffer_create(&ws->base, buffer_size,
>                                 ws->info.gart_page_size,
>                                 RADEON_DOMAIN_GTT, 0);
> @@ -602,53 +596,38 @@ static bool amdgpu_ib_new_buffer(struct
> amdgpu_winsys *ws, struct amdgpu_ib *ib)
> 
>  static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
>  {
>     switch (ib_type) {
>     case IB_MAIN:
>        /* Smaller submits means the GPU gets busy sooner and there is 
> less
>         * waiting for buffers and fences. Proof:
>         *   
> http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
>         */
>        return 20 * 1024;
> -   case IB_CONST_PREAMBLE:
> -   case IB_CONST:
> -      /* There isn't really any reason to limit CE IB size beyond the 
> natural
> -       * limit implied by the main IB, except perhaps GTT size. Just 
> return
> -       * an extremely large value that we never get anywhere close to.
> -       */
> -      return 16 * 1024 * 1024;
>     default:
>        unreachable("bad ib_type");
>     }
>  }
> 
>  static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct 
> amdgpu_cs *cs,
>                                enum ib_type ib_type)
>  {
>     struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
>     /* Small IBs are better than big IBs, because the GPU goes idle 
> quicker
>      * and there is less waiting for buffers and fences. Proof:
>      *   
> http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
>      */
>     struct amdgpu_ib *ib = NULL;
>     struct amdgpu_cs_ib_info *info = &cs->csc->ib[ib_type];
>     unsigned ib_size = 0;
> 
>     switch (ib_type) {
> -   case IB_CONST_PREAMBLE:
> -      ib = &cs->const_preamble_ib;
> -      ib_size = 256 * 4;
> -      break;
> -   case IB_CONST:
> -      ib = &cs->const_ib;
> -      ib_size = 8 * 1024 * 4;
> -      break;
>     case IB_MAIN:
>        ib = &cs->main;
>        ib_size = 4 * 1024 * 4;
>        break;
>     default:
>        unreachable("unhandled IB type");
>     }
> 
>     if (!amdgpu_cs_has_chaining(cs)) {
>        ib_size = MAX2(ib_size,
> @@ -721,24 +700,20 @@ static bool amdgpu_init_cs_context(struct
> amdgpu_cs_context *cs,
>        cs->request.ip_type = AMDGPU_HW_IP_GFX;
>        break;
>     }
> 
>     memset(cs->buffer_indices_hashlist, -1,
> sizeof(cs->buffer_indices_hashlist));
>     cs->last_added_bo = NULL;
> 
>     cs->request.number_of_ibs = 1;
>     cs->request.ibs = &cs->ib[IB_MAIN];
> 
> -   cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
> -   cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
> -                                     AMDGPU_IB_FLAG_PREAMBLE;
> -
>     return true;
>  }
> 
>  static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
>  {
>     unsigned i;
> 
>     for (i = 0; i < cs->num_real_buffers; i++) {
>        p_atomic_dec(&cs->real_buffers[i].bo->num_cs_references);
>        amdgpu_winsys_bo_reference(&cs->real_buffers[i].bo, NULL);
> @@ -792,22 +767,20 @@ amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
>     }
> 
>     util_queue_fence_init(&cs->flush_completed);
> 
>     cs->ctx = ctx;
>     cs->flush_cs = flush;
>     cs->flush_data = flush_ctx;
>     cs->ring_type = ring_type;
> 
>     cs->main.ib_type = IB_MAIN;
> -   cs->const_ib.ib_type = IB_CONST;
> -   cs->const_preamble_ib.ib_type = IB_CONST_PREAMBLE;
> 
>     if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
>        FREE(cs);
>        return NULL;
>     }
> 
>     if (!amdgpu_init_cs_context(&cs->csc2, ring_type)) {
>        amdgpu_destroy_cs_context(&cs->csc1);
>        FREE(cs);
>        return NULL;
> @@ -821,66 +794,20 @@ amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
>        amdgpu_destroy_cs_context(&cs->csc2);
>        amdgpu_destroy_cs_context(&cs->csc1);
>        FREE(cs);
>        return NULL;
>     }
> 
>     p_atomic_inc(&ctx->ws->num_cs);
>     return &cs->main.base;
>  }
> 
> -static struct radeon_winsys_cs *
> -amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
> -{
> -   struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
> -   struct amdgpu_winsys *ws = cs->ctx->ws;
> -
> -   /* only one const IB can be added */
> -   if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
> -      return NULL;
> -
> -   if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST))
> -      return NULL;
> -
> -   cs->csc->request.number_of_ibs = 2;
> -   cs->csc->request.ibs = &cs->csc->ib[IB_CONST];
> -
> -   cs->cst->request.number_of_ibs = 2;
> -   cs->cst->request.ibs = &cs->cst->ib[IB_CONST];
> -
> -   return &cs->const_ib.base;
> -}
> -
> -static struct radeon_winsys_cs *
> -amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
> -{
> -   struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
> -   struct amdgpu_winsys *ws = cs->ctx->ws;
> -
> -   /* only one const preamble IB can be added and only when the const 
> IB has
> -    * also been mapped */
> -   if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
> -       cs->const_preamble_ib.ib_mapped)
> -      return NULL;
> -
> -   if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE))
> -      return NULL;
> -
> -   cs->csc->request.number_of_ibs = 3;
> -   cs->csc->request.ibs = &cs->csc->ib[IB_CONST_PREAMBLE];
> -
> -   cs->cst->request.number_of_ibs = 3;
> -   cs->cst->request.ibs = &cs->cst->ib[IB_CONST_PREAMBLE];
> -
> -   return &cs->const_preamble_ib.base;
> -}
> -
>  static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
>  {
>     return true;
>  }
> 
>  static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, 
> unsigned dw)
>  {
>     struct amdgpu_ib *ib = amdgpu_ib(rcs);
>     struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
>     unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
> @@ -1316,29 +1243,20 @@ static int amdgpu_cs_flush(struct 
> radeon_winsys_cs *rcs,
>        break;
>     case RING_GFX:
>        /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements 
> */
>        if (ws->info.gfx_ib_pad_with_type2) {
>           while (rcs->current.cdw & 7)
>              radeon_emit(rcs, 0x80000000); /* type2 nop packet */
>        } else {
>           while (rcs->current.cdw & 7)
>              radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
>        }
> -
> -      /* Also pad the const IB. */
> -      if (cs->const_ib.ib_mapped)
> -         while (!cs->const_ib.base.current.cdw ||
> (cs->const_ib.base.current.cdw & 7))
> -            radeon_emit(&cs->const_ib.base, 0xffff1000); /* type3 nop 
> packet */
> -
> -      if (cs->const_preamble_ib.ib_mapped)
> -         while (!cs->const_preamble_ib.base.current.cdw ||
> (cs->const_preamble_ib.base.current.cdw & 7))
> -            radeon_emit(&cs->const_preamble_ib.base, 0xffff1000);
>        break;
>     case RING_UVD:
>        while (rcs->current.cdw & 15)
>           radeon_emit(rcs, 0x80000000); /* type2 nop packet */
>        break;
>     case RING_VCN_DEC:
>        while (rcs->current.cdw & 15)
>           radeon_emit(rcs, 0x81ff); /* nop packet */
>        break;
>     default:
> @@ -1351,26 +1269,20 @@ static int amdgpu_cs_flush(struct 
> radeon_winsys_cs *rcs,
> 
>     /* If the CS is not empty or overflowed.... */
>     if (likely(radeon_emitted(&cs->main.base, 0) &&
>         cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
>         !debug_get_option_noop())) {
>        struct amdgpu_cs_context *cur = cs->csc;
> 
>        /* Set IB sizes. */
>        amdgpu_ib_finalize(&cs->main);
> 
> -      if (cs->const_ib.ib_mapped)
> -         amdgpu_ib_finalize(&cs->const_ib);
> -
> -      if (cs->const_preamble_ib.ib_mapped)
> -         amdgpu_ib_finalize(&cs->const_preamble_ib);
> -
>        /* Create a fence. */
>        amdgpu_fence_reference(&cur->fence, NULL);
>        if (cs->next_fence) {
>           /* just move the reference */
>           cur->fence = cs->next_fence;
>           cs->next_fence = NULL;
>        } else {
>           cur->fence = amdgpu_fence_create(cs->ctx,
>                                            cur->request.ip_type,
>                                            cur->request.ip_instance,
> @@ -1402,24 +1314,20 @@ static int amdgpu_cs_flush(struct 
> radeon_winsys_cs *rcs,
> 
>        if (!(flags & RADEON_FLUSH_ASYNC)) {
>           amdgpu_cs_sync_flush(rcs);
>           error_code = cur->error_code;
>        }
>     } else {
>        amdgpu_cs_context_cleanup(cs->csc);
>     }
> 
>     amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
> -   if (cs->const_ib.ib_mapped)
> -      amdgpu_get_new_ib(&ws->base, cs, IB_CONST);
> -   if (cs->const_preamble_ib.ib_mapped)
> -      amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE);
> 
>     cs->main.base.used_gart = 0;
>     cs->main.base.used_vram = 0;
> 
>     if (cs->ring_type == RING_GFX)
>        ws->num_gfx_IBs++;
>     else if (cs->ring_type == RING_DMA)
>        ws->num_sdma_IBs++;
> 
>     return error_code;
> @@ -1427,24 +1335,20 @@ static int amdgpu_cs_flush(struct 
> radeon_winsys_cs *rcs,
> 
>  static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
>  {
>     struct amdgpu_cs *cs = amdgpu_cs(rcs);
> 
>     amdgpu_cs_sync_flush(rcs);
>     util_queue_fence_destroy(&cs->flush_completed);
>     p_atomic_dec(&cs->ctx->ws->num_cs);
>     pb_reference(&cs->main.big_ib_buffer, NULL);
>     FREE(cs->main.base.prev);
> -   pb_reference(&cs->const_ib.big_ib_buffer, NULL);
> -   FREE(cs->const_ib.base.prev);
> -   pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
> -   FREE(cs->const_preamble_ib.base.prev);
>     amdgpu_destroy_cs_context(&cs->csc1);
>     amdgpu_destroy_cs_context(&cs->csc2);
>     amdgpu_fence_reference(&cs->next_fence, NULL);
>     FREE(cs);
>  }
> 
>  static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
>                                      struct pb_buffer *_buf,
>                                      enum radeon_bo_usage usage)
>  {
> @@ -1453,22 +1357,20 @@ static bool amdgpu_bo_is_referenced(struct
> radeon_winsys_cs *rcs,
> 
>     return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
>  }
> 
>  void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
>  {
>     ws->base.ctx_create = amdgpu_ctx_create;
>     ws->base.ctx_destroy = amdgpu_ctx_destroy;
>     ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
>     ws->base.cs_create = amdgpu_cs_create;
> -   ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
> -   ws->base.cs_add_const_preamble_ib = 
> amdgpu_cs_add_const_preamble_ib;
>     ws->base.cs_destroy = amdgpu_cs_destroy;
>     ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
>     ws->base.cs_validate = amdgpu_cs_validate;
>     ws->base.cs_check_space = amdgpu_cs_check_space;
>     ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
>     ws->base.cs_flush = amdgpu_cs_flush;
>     ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
>     ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
>     ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
>     ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
> diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
> b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
> index d83c1e0..8f5c336 100644
> --- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
> +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
> @@ -52,24 +52,22 @@ struct amdgpu_cs_buffer {
>           uint64_t priority_usage;
>        } real;
>        struct {
>           uint32_t real_idx; /* index of underlying real BO */
>        } slab;
>     } u;
>     enum radeon_bo_usage usage;
>  };
> 
>  enum ib_type {
> -   IB_CONST_PREAMBLE = 0,
> -   IB_CONST = 1, /* the const IB must be first */
> -   IB_MAIN = 2,
> -   IB_NUM
> +   IB_MAIN,
> +   IB_NUM,
>  };
> 
>  struct amdgpu_ib {
>     struct radeon_winsys_cs base;
> 
>     /* A buffer out of which new IBs are allocated. */
>     struct pb_buffer        *big_ib_buffer;
>     uint8_t                 *ib_mapped;
>     unsigned                used_ib_space;
>     unsigned                max_ib_size;
> @@ -110,22 +108,20 @@ struct amdgpu_cs_context {
>     unsigned                    max_fence_dependencies;
> 
>     struct pipe_fence_handle    *fence;
> 
>     /* the error returned from cs_flush for non-async submissions */
>     int                         error_code;
>  };
> 
>  struct amdgpu_cs {
>     struct amdgpu_ib main; /* must be first because this is inherited 
> */
> -   struct amdgpu_ib const_ib; /* optional constant engine IB */
> -   struct amdgpu_ib const_preamble_ib;
>     struct amdgpu_ctx *ctx;
>     enum ring_type ring_type;
> 
>     /* We flip between these two CS. While one is being consumed
>      * by the kernel in another thread, the other one is being filled
>      * by the pipe driver. */
>     struct amdgpu_cs_context csc1;
>     struct amdgpu_cs_context csc2;
>     /* The currently-used CS. */
>     struct amdgpu_cs_context *csc;
> @@ -192,24 +188,20 @@ amdgpu_cs(struct radeon_winsys_cs *base)
> 
>  #define get_container(member_ptr, container_type, container_member) \
>     (container_type *)((char *)(member_ptr) - offsetof(container_type,
> container_member))
> 
>  static inline struct amdgpu_cs *
>  amdgpu_cs_from_ib(struct amdgpu_ib *ib)
>  {
>     switch (ib->ib_type) {
>     case IB_MAIN:
>        return get_container(ib, struct amdgpu_cs, main);
> -   case IB_CONST:
> -      return get_container(ib, struct amdgpu_cs, const_ib);
> -   case IB_CONST_PREAMBLE:
> -      return get_container(ib, struct amdgpu_cs, const_preamble_ib);
>     default:
>        unreachable("bad ib_type");
>     }
>  }
> 
>  static inline bool
>  amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
>                                struct amdgpu_winsys_bo *bo)
>  {
>     int num_refs = bo->num_cs_references;


More information about the mesa-dev mailing list