[Libva] [PATCH 3/5] intel_batchbuffer: Add utility fuction for supporting 48-bit address relocations in Gen8+

Zhao Yakui yakui.zhao at intel.com
Mon Oct 12 01:40:20 PDT 2015


On 10/12/2015 04:37 PM, sreerenj.balachandran at intel.com wrote:
> From: Sreerenj Balachandran<sreerenj.balachandran at intel.com>
>
> There are Gen8+ instruction which requires 48bit address relocation
> (eg: Surface State Address in STATE_BASE_ADDRESS instruction). Add the batchbuffer
> utility funcation for the relocation based on Mesa's batch buffer implementation.

This looks good to me.

Add: Reviewed-by: Zhao Yakui <yakui.zhao at intel.com>

Thanks
    Yakui
>
> Signed-off-by: Sreerenj Balachandran<sreerenj.balachandran at intel.com>
> ---
>   src/intel_batchbuffer.c | 18 ++++++++++++++++++
>   src/intel_batchbuffer.h | 12 ++++++++++++
>   2 files changed, 30 insertions(+)
>
> diff --git a/src/intel_batchbuffer.c b/src/intel_batchbuffer.c
> index c5604b8..a145e05 100644
> --- a/src/intel_batchbuffer.c
> +++ b/src/intel_batchbuffer.c
> @@ -168,6 +168,24 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, dri_bo *bo,
>   }
>
>   void
> +intel_batchbuffer_emit_reloc64(struct intel_batchbuffer *batch, dri_bo *bo,
> +                                uint32_t read_domains, uint32_t write_domains,
> +                                uint32_t delta)
> +{
> +    assert(batch->ptr - batch->map<  batch->size);
> +    dri_bo_emit_reloc(batch->buffer, read_domains, write_domains,
> +                      delta, batch->ptr - batch->map, bo);
> +
> +   /* Using the old buffer offset, write in what the right data would be, in
> +    * case the buffer doesn't move and we can short-circuit the relocation
> +    * processing in the kernel.
> +    */
> +   uint64_t offset = bo->offset64 + delta;
> +   intel_batchbuffer_emit_dword(batch, offset);
> +   intel_batchbuffer_emit_dword(batch, offset>>  32);
> +}
> +
> +void
>   intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
>                                      unsigned int size)
>   {
> diff --git a/src/intel_batchbuffer.h b/src/intel_batchbuffer.h
> index 41d359d..377e6ae 100644
> --- a/src/intel_batchbuffer.h
> +++ b/src/intel_batchbuffer.h
> @@ -40,6 +40,9 @@ void intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, unsigned int
>   void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, dri_bo *bo,
>                                     uint32_t read_domains, uint32_t write_domains,
>                                     uint32_t delta);
> +void intel_batchbuffer_emit_reloc64(struct intel_batchbuffer *batch, dri_bo *bo,
> +                                  uint32_t read_domains, uint32_t write_domains,
> +                                  uint32_t delta);
>   void intel_batchbuffer_require_space(struct intel_batchbuffer *batch, unsigned int size);
>   void intel_batchbuffer_data(struct intel_batchbuffer *batch, void *data, unsigned int size);
>   void intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
> @@ -78,6 +81,13 @@ void intel_batchbuffer_start_atomic_bcs_override(struct intel_batchbuffer *batch
>                                        delta);                            \
>       } while (0)
>
> +/* Handle 48-bit address relocations for Gen8+ */
> +#define __OUT_RELOC64(batch, bo, read_domains, write_domain, delta) do { \
> +         intel_batchbuffer_emit_reloc64(batch, bo,                       \
> +         read_domains, write_domain,                                     \
> +         delta);                                                         \
> +    } while (0)
> +
>   #define __ADVANCE_BATCH(batch) do {             \
>           intel_batchbuffer_advance_batch(batch); \
>       } while (0)
> @@ -98,6 +108,8 @@ void intel_batchbuffer_start_atomic_bcs_override(struct intel_batchbuffer *batch
>       __OUT_RELOC(batch, bo, read_domains, write_domain, delta)
>   #define OUT_BCS_RELOC(batch, bo, read_domains, write_domain, delta)     \
>       __OUT_RELOC(batch, bo, read_domains, write_domain, delta)
> +#define OUT_RELOC64(batch, bo, read_domains, write_domain, delta)       \
> +    __OUT_RELOC64(batch, bo, read_domains, write_domain, delta)
>
>   #define ADVANCE_BATCH(batch)            __ADVANCE_BATCH(batch)
>   #define ADVANCE_BLT_BATCH(batch)        __ADVANCE_BATCH(batch)



More information about the Libva mailing list