[igt-dev] [PATCH i-g-t v2 1/5] lib/media_spin: Move helper functions to gpu_fill library
Daniele Ceraolo Spurio
daniele.ceraolospurio at intel.com
Fri Apr 27 17:47:27 UTC 2018
On 27/04/18 03:03, Katarzyna Dec wrote:
> Let's remove duplications introduced by moving media_spin helper
> functions to gpu_fill. These were mainly the same functions
> as for Gen8 media/gpgpu fill. gen8_render_flush from media_spin
> was replaced by gen7_render_flush. The only functions that were
> left intact are gen8_emit_media_objects_spin and
> gen8_spin_render_flush.
>
gen8_spin_render_flush does not exist in this patch. Also you've left
gen8_spin_curbe_buffer_data, gen8_emit_vfe_state_spin and
gen8lp_emit_media_objects_spin as well.
With the commit message fixed:
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
Daniele
> v2: squashed patches 1 and 2 from v1
>
> Signed-off-by: Katarzyna Dec <katarzyna.dec at intel.com>
> Cc: Lukasz Kalamarz <lukasz.kalamarz at intel.com>
> Cc: Antonio Argenziano <antonio.argenziano at intel.com>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
> ---
> lib/gpu_fill.c | 81 +++++++++++++
> lib/gpu_fill.h | 13 +++
> lib/media_spin.c | 347 +++----------------------------------------------------
> 3 files changed, 113 insertions(+), 328 deletions(-)
>
> diff --git a/lib/gpu_fill.c b/lib/gpu_fill.c
> index f05d4eca..f5fc61bb 100644
> --- a/lib/gpu_fill.c
> +++ b/lib/gpu_fill.c
> @@ -351,6 +351,20 @@ gen7_emit_gpgpu_walk(struct intel_batchbuffer *batch,
> OUT_BATCH(0xffffffff);
> }
>
> +uint32_t
> +gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch,
> + uint32_t iters)
> +{
> + uint32_t *curbe_buffer;
> + uint32_t offset;
> +
> + curbe_buffer = intel_batchbuffer_subdata_alloc(batch, 64, 64);
> + offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer);
> + *curbe_buffer = iters;
> +
> + return offset;
> +}
> +
> uint32_t
> gen8_fill_surface_state(struct intel_batchbuffer *batch,
> struct igt_buf *buf,
> @@ -525,6 +539,30 @@ gen8_emit_vfe_state_gpgpu(struct intel_batchbuffer *batch)
> OUT_BATCH(0);
> }
>
> +void
> +gen8_emit_vfe_state_spin(struct intel_batchbuffer *batch)
> +{
> + OUT_BATCH(GEN8_MEDIA_VFE_STATE | (9 - 2));
> +
> + /* scratch buffer */
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> +
> + /* number of threads & urb entries */
> + OUT_BATCH(2 << 8);
> +
> + OUT_BATCH(0);
> +
> + /* urb entry size & curbe size */
> + OUT_BATCH(2 << 16 |
> + 2);
> +
> + /* scoreboard */
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> +}
> +
> void
> gen8_emit_gpgpu_walk(struct intel_batchbuffer *batch,
> unsigned x, unsigned y,
> @@ -585,6 +623,49 @@ gen8_emit_gpgpu_walk(struct intel_batchbuffer *batch,
> OUT_BATCH(0xffffffff);
> }
>
> +void
> +gen8_emit_media_objects_spin(struct intel_batchbuffer *batch)
> +{
> + OUT_BATCH(GEN8_MEDIA_OBJECT | (8 - 2));
> +
> + /* interface descriptor offset */
> + OUT_BATCH(0);
> +
> + /* without indirect data */
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> +
> + /* scoreboard */
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> +
> + /* inline data (xoffset, yoffset) */
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> + gen8_emit_media_state_flush(batch);
> +}
> +
> +void
> +gen8lp_emit_media_objects_spin(struct intel_batchbuffer *batch)
> +{
> + OUT_BATCH(GEN8_MEDIA_OBJECT | (8 - 2));
> +
> + /* interface descriptor offset */
> + OUT_BATCH(0);
> +
> + /* without indirect data */
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> +
> + /* scoreboard */
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> +
> + /* inline data (xoffset, yoffset) */
> + OUT_BATCH(0);
> + OUT_BATCH(0);
> +}
> +
> void
> gen9_emit_state_base_address(struct intel_batchbuffer *batch)
> {
> diff --git a/lib/gpu_fill.h b/lib/gpu_fill.h
> index 067d4987..5335fe3f 100644
> --- a/lib/gpu_fill.h
> +++ b/lib/gpu_fill.h
> @@ -88,6 +88,10 @@ gen7_emit_gpgpu_walk(struct intel_batchbuffer *batch,
> unsigned x, unsigned y,
> unsigned width, unsigned height);
>
> +uint32_t
> +gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch,
> + uint32_t iters);
> +
> uint32_t
> gen8_fill_surface_state(struct intel_batchbuffer *batch,
> struct igt_buf *buf,
> @@ -109,11 +113,20 @@ gen8_emit_vfe_state(struct intel_batchbuffer *batch);
> void
> gen8_emit_vfe_state_gpgpu(struct intel_batchbuffer *batch);
>
> +void
> +gen8_emit_vfe_state_spin(struct intel_batchbuffer *batch);
> +
> void
> gen8_emit_gpgpu_walk(struct intel_batchbuffer *batch,
> unsigned x, unsigned y,
> unsigned width, unsigned height);
>
> +void
> +gen8_emit_media_objects_spin(struct intel_batchbuffer *batch);
> +
> +void
> +gen8lp_emit_media_objects_spin(struct intel_batchbuffer *batch);
> +
> void
> gen9_emit_state_base_address(struct intel_batchbuffer *batch);
>
> diff --git a/lib/media_spin.c b/lib/media_spin.c
> index d9e058b1..16ea8483 100644
> --- a/lib/media_spin.c
> +++ b/lib/media_spin.c
> @@ -31,6 +31,7 @@
> #include "intel_batchbuffer.h"
> #include "gen8_media.h"
> #include "media_spin.h"
> +#include "gpu_fill.h"
>
> static const uint32_t spin_kernel[][4] = {
> { 0x00600001, 0x20800208, 0x008d0000, 0x00000000 }, /* mov (8)r4.0<1>:ud r0.0<8;8;1>:ud */
> @@ -45,316 +46,6 @@ static const uint32_t spin_kernel[][4] = {
> { 0x07800031, 0x20000a40, 0x0e000e00, 0x82000010 }, /* send.ts (16)null<1> r112<0;1;0>:d 0x82000010 */
> };
>
> -static void
> -gen8_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
> -{
> - int ret;
> -
> - ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
> - if (ret == 0)
> - ret = drm_intel_gem_bo_context_exec(batch->bo, NULL,
> - batch_end, 0);
> - igt_assert_eq(ret, 0);
> -}
> -
> -static uint32_t
> -gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch,
> - uint32_t iters)
> -{
> - uint32_t *curbe_buffer;
> - uint32_t offset;
> -
> - curbe_buffer = intel_batchbuffer_subdata_alloc(batch, 64, 64);
> - offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer);
> - *curbe_buffer = iters;
> -
> - return offset;
> -}
> -
> -static uint32_t
> -gen8_spin_surface_state(struct intel_batchbuffer *batch,
> - struct igt_buf *buf,
> - uint32_t format,
> - int is_dst)
> -{
> - struct gen8_surface_state *ss;
> - uint32_t write_domain, read_domain, offset;
> - int ret;
> -
> - if (is_dst) {
> - write_domain = read_domain = I915_GEM_DOMAIN_RENDER;
> - } else {
> - write_domain = 0;
> - read_domain = I915_GEM_DOMAIN_SAMPLER;
> - }
> -
> - ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
> - offset = intel_batchbuffer_subdata_offset(batch, ss);
> -
> - ss->ss0.surface_type = GEN8_SURFACE_2D;
> - ss->ss0.surface_format = format;
> - ss->ss0.render_cache_read_write = 1;
> - ss->ss0.vertical_alignment = 1; /* align 4 */
> - ss->ss0.horizontal_alignment = 1; /* align 4 */
> -
> - if (buf->tiling == I915_TILING_X)
> - ss->ss0.tiled_mode = 2;
> - else if (buf->tiling == I915_TILING_Y)
> - ss->ss0.tiled_mode = 3;
> -
> - ss->ss8.base_addr = buf->bo->offset;
> -
> - ret = drm_intel_bo_emit_reloc(batch->bo,
> - intel_batchbuffer_subdata_offset(batch, ss) + 8 * 4,
> - buf->bo, 0,
> - read_domain, write_domain);
> - igt_assert_eq(ret, 0);
> -
> - ss->ss2.height = igt_buf_height(buf) - 1;
> - ss->ss2.width = igt_buf_width(buf) - 1;
> - ss->ss3.pitch = buf->stride - 1;
> -
> - ss->ss7.shader_chanel_select_r = 4;
> - ss->ss7.shader_chanel_select_g = 5;
> - ss->ss7.shader_chanel_select_b = 6;
> - ss->ss7.shader_chanel_select_a = 7;
> -
> - return offset;
> -}
> -
> -static uint32_t
> -gen8_spin_binding_table(struct intel_batchbuffer *batch,
> - struct igt_buf *dst)
> -{
> - uint32_t *binding_table, offset;
> -
> - binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 64);
> - offset = intel_batchbuffer_subdata_offset(batch, binding_table);
> -
> - binding_table[0] = gen8_spin_surface_state(batch, dst,
> - GEN8_SURFACEFORMAT_R8_UNORM, 1);
> -
> - return offset;
> -}
> -
> -static uint32_t
> -gen8_spin_media_kernel(struct intel_batchbuffer *batch,
> - const uint32_t kernel[][4],
> - size_t size)
> -{
> - uint32_t offset;
> -
> - offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
> -
> - return offset;
> -}
> -
> -static uint32_t
> -gen8_spin_interface_descriptor(struct intel_batchbuffer *batch,
> - struct igt_buf *dst)
> -{
> - struct gen8_interface_descriptor_data *idd;
> - uint32_t offset;
> - uint32_t binding_table_offset, kernel_offset;
> -
> - binding_table_offset = gen8_spin_binding_table(batch, dst);
> - kernel_offset = gen8_spin_media_kernel(batch, spin_kernel,
> - sizeof(spin_kernel));
> -
> - idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
> - offset = intel_batchbuffer_subdata_offset(batch, idd);
> -
> - idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
> -
> - idd->desc2.single_program_flow = 1;
> - idd->desc2.floating_point_mode = GEN8_FLOATING_POINT_IEEE_754;
> -
> - idd->desc3.sampler_count = 0; /* 0 samplers used */
> - idd->desc3.sampler_state_pointer = 0;
> -
> - idd->desc4.binding_table_entry_count = 0;
> - idd->desc4.binding_table_pointer = (binding_table_offset >> 5);
> -
> - idd->desc5.constant_urb_entry_read_offset = 0;
> - idd->desc5.constant_urb_entry_read_length = 1; /* grf 1 */
> -
> - return offset;
> -}
> -
> -static void
> -gen8_emit_state_base_address(struct intel_batchbuffer *batch)
> -{
> - OUT_BATCH(GEN8_STATE_BASE_ADDRESS | (16 - 2));
> -
> - /* general */
> - OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
> - OUT_BATCH(0);
> -
> - /* stateless data port */
> - OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
> -
> - /* surface */
> - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_SAMPLER, 0, BASE_ADDRESS_MODIFY);
> -
> - /* dynamic */
> - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION,
> - 0, BASE_ADDRESS_MODIFY);
> -
> - /* indirect */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -
> - /* instruction */
> - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
> -
> - /* general state buffer size */
> - OUT_BATCH(0xfffff000 | 1);
> - /* dynamic state buffer size */
> - OUT_BATCH(1 << 12 | 1);
> - /* indirect object buffer size */
> - OUT_BATCH(0xfffff000 | 1);
> - /* intruction buffer size, must set modify enable bit, otherwise it may result in GPU hang */
> - OUT_BATCH(1 << 12 | 1);
> -}
> -
> -static void
> -gen9_emit_state_base_address(struct intel_batchbuffer *batch)
> -{
> - OUT_BATCH(GEN8_STATE_BASE_ADDRESS | (19 - 2));
> -
> - /* general */
> - OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
> - OUT_BATCH(0);
> -
> - /* stateless data port */
> - OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
> -
> - /* surface */
> - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_SAMPLER, 0, BASE_ADDRESS_MODIFY);
> -
> - /* dynamic */
> - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION,
> - 0, BASE_ADDRESS_MODIFY);
> -
> - /* indirect */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -
> - /* instruction */
> - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
> -
> - /* general state buffer size */
> - OUT_BATCH(0xfffff000 | 1);
> - /* dynamic state buffer size */
> - OUT_BATCH(1 << 12 | 1);
> - /* indirect object buffer size */
> - OUT_BATCH(0xfffff000 | 1);
> - /* intruction buffer size, must set modify enable bit, otherwise it may result in GPU hang */
> - OUT_BATCH(1 << 12 | 1);
> -
> - /* Bindless surface state base address */
> - OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
> - OUT_BATCH(0);
> - OUT_BATCH(0xfffff000);
> -}
> -
> -static void
> -gen8_emit_vfe_state(struct intel_batchbuffer *batch)
> -{
> - OUT_BATCH(GEN8_MEDIA_VFE_STATE | (9 - 2));
> -
> - /* scratch buffer */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -
> - /* number of threads & urb entries */
> - OUT_BATCH(2 << 8);
> -
> - OUT_BATCH(0);
> -
> - /* urb entry size & curbe size */
> - OUT_BATCH(2 << 16 |
> - 2);
> -
> - /* scoreboard */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -}
> -
> -static void
> -gen8_emit_curbe_load(struct intel_batchbuffer *batch, uint32_t curbe_buffer)
> -{
> - OUT_BATCH(GEN8_MEDIA_CURBE_LOAD | (4 - 2));
> - OUT_BATCH(0);
> - /* curbe total data length */
> - OUT_BATCH(64);
> - /* curbe data start address, is relative to the dynamics base address */
> - OUT_BATCH(curbe_buffer);
> -}
> -
> -static void
> -gen8_emit_interface_descriptor_load(struct intel_batchbuffer *batch,
> - uint32_t interface_descriptor)
> -{
> - OUT_BATCH(GEN8_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
> - OUT_BATCH(0);
> - /* interface descriptor data length */
> - OUT_BATCH(sizeof(struct gen8_interface_descriptor_data));
> - /* interface descriptor address, is relative to the dynamics base address */
> - OUT_BATCH(interface_descriptor);
> -}
> -
> -static void
> -gen8_emit_media_state_flush(struct intel_batchbuffer *batch)
> -{
> - OUT_BATCH(GEN8_MEDIA_STATE_FLUSH | (2 - 2));
> - OUT_BATCH(0);
> -}
> -
> -static void
> -gen8_emit_media_objects(struct intel_batchbuffer *batch)
> -{
> - OUT_BATCH(GEN8_MEDIA_OBJECT | (8 - 2));
> -
> - /* interface descriptor offset */
> - OUT_BATCH(0);
> -
> - /* without indirect data */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -
> - /* scoreboard */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -
> - /* inline data (xoffset, yoffset) */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> - gen8_emit_media_state_flush(batch);
> -}
> -
> -static void
> -gen8lp_emit_media_objects(struct intel_batchbuffer *batch)
> -{
> - OUT_BATCH(GEN8_MEDIA_OBJECT | (8 - 2));
> -
> - /* interface descriptor offset */
> - OUT_BATCH(0);
> -
> - /* without indirect data */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -
> - /* scoreboard */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -
> - /* inline data (xoffset, yoffset) */
> - OUT_BATCH(0);
> - OUT_BATCH(0);
> -}
> -
> /*
> * This sets up the media pipeline,
> *
> @@ -390,7 +81,7 @@ gen8_media_spinfunc(struct intel_batchbuffer *batch,
> batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
>
> curbe_buffer = gen8_spin_curbe_buffer_data(batch, spins);
> - interface_descriptor = gen8_spin_interface_descriptor(batch, dst);
> + interface_descriptor = gen8_fill_interface_descriptor(batch, dst, spin_kernel, sizeof(spin_kernel));
> igt_assert(batch->ptr < &batch->buffer[4095]);
>
> /* media pipeline */
> @@ -398,20 +89,20 @@ gen8_media_spinfunc(struct intel_batchbuffer *batch,
> OUT_BATCH(GEN8_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
> gen8_emit_state_base_address(batch);
>
> - gen8_emit_vfe_state(batch);
> + gen8_emit_vfe_state_spin(batch);
>
> - gen8_emit_curbe_load(batch, curbe_buffer);
> + gen7_emit_curbe_load(batch, curbe_buffer);
>
> - gen8_emit_interface_descriptor_load(batch, interface_descriptor);
> + gen7_emit_interface_descriptor_load(batch, interface_descriptor);
>
> - gen8_emit_media_objects(batch);
> + gen8_emit_media_objects_spin(batch);
>
> OUT_BATCH(MI_BATCH_BUFFER_END);
>
> batch_end = intel_batchbuffer_align(batch, 8);
> igt_assert(batch_end < BATCH_STATE_SPLIT);
>
> - gen8_render_flush(batch, batch_end);
> + gen7_render_flush(batch, batch_end);
> intel_batchbuffer_reset(batch);
> }
>
> @@ -428,7 +119,7 @@ gen8lp_media_spinfunc(struct intel_batchbuffer *batch,
> batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
>
> curbe_buffer = gen8_spin_curbe_buffer_data(batch, spins);
> - interface_descriptor = gen8_spin_interface_descriptor(batch, dst);
> + interface_descriptor = gen8_fill_interface_descriptor(batch, dst, spin_kernel, sizeof(spin_kernel));
> igt_assert(batch->ptr < &batch->buffer[4095]);
>
> /* media pipeline */
> @@ -436,20 +127,20 @@ gen8lp_media_spinfunc(struct intel_batchbuffer *batch,
> OUT_BATCH(GEN8_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
> gen8_emit_state_base_address(batch);
>
> - gen8_emit_vfe_state(batch);
> + gen8_emit_vfe_state_spin(batch);
>
> - gen8_emit_curbe_load(batch, curbe_buffer);
> + gen7_emit_curbe_load(batch, curbe_buffer);
>
> - gen8_emit_interface_descriptor_load(batch, interface_descriptor);
> + gen7_emit_interface_descriptor_load(batch, interface_descriptor);
>
> - gen8lp_emit_media_objects(batch);
> + gen8lp_emit_media_objects_spin(batch);
>
> OUT_BATCH(MI_BATCH_BUFFER_END);
>
> batch_end = intel_batchbuffer_align(batch, 8);
> igt_assert(batch_end < BATCH_STATE_SPLIT);
>
> - gen8_render_flush(batch, batch_end);
> + gen7_render_flush(batch, batch_end);
> intel_batchbuffer_reset(batch);
> }
>
> @@ -466,7 +157,7 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch,
> batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
>
> curbe_buffer = gen8_spin_curbe_buffer_data(batch, spins);
> - interface_descriptor = gen8_spin_interface_descriptor(batch, dst);
> + interface_descriptor = gen8_fill_interface_descriptor(batch, dst, spin_kernel, sizeof(spin_kernel));
> igt_assert(batch->ptr < &batch->buffer[4095]);
>
> /* media pipeline */
> @@ -479,13 +170,13 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch,
> GEN9_FORCE_MEDIA_AWAKE_MASK);
> gen9_emit_state_base_address(batch);
>
> - gen8_emit_vfe_state(batch);
> + gen8_emit_vfe_state_spin(batch);
>
> - gen8_emit_curbe_load(batch, curbe_buffer);
> + gen7_emit_curbe_load(batch, curbe_buffer);
>
> - gen8_emit_interface_descriptor_load(batch, interface_descriptor);
> + gen7_emit_interface_descriptor_load(batch, interface_descriptor);
>
> - gen8_emit_media_objects(batch);
> + gen8_emit_media_objects_spin(batch);
>
> OUT_BATCH(GEN8_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA |
> GEN9_FORCE_MEDIA_AWAKE_DISABLE |
> @@ -499,6 +190,6 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch,
> batch_end = intel_batchbuffer_align(batch, 8);
> igt_assert(batch_end < BATCH_STATE_SPLIT);
>
> - gen8_render_flush(batch, batch_end);
> + gen7_render_flush(batch, batch_end);
> intel_batchbuffer_reset(batch);
> }
>
More information about the igt-dev
mailing list