[igt-dev] [PATCH i-g-t v28 17/36] lib/intel_batchbuffer: Add tracking intel_buf to intel_bb
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Mon Mar 22 12:37:31 UTC 2021
>From now on intel_bb starts tracking added/removed intel_bufs.
We're safe now regardless order of intel_buf close/destroy
or intel_bb destroy paths.
When intel_buf is closed/destroyed first and it was previously added
to intel_bb it calls the code which removes itself from intel_bb.
In destroy path we go over all tracked intel_bufs and clear tracking
information and buffer offset (it is set to INTEL_BUF_INVALID_ADDRESS).
Reset path is handled as follows:
- intel_bb_reset(ibb, false) - just clean objects array leaving
cache / allocator state intact.
- intel_bb_reset(ibb, true) - purge cache as well as detach intel_bufs
from intel_bb (release offsets from allocator).
Remove intel_bb_object_offset_to_buf() function as tracking intel_buf
updates (checks for allocator) their offsets after execbuf.
Alter api_intel_bb according to intel-bb changes.
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Acked-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
lib/intel_batchbuffer.c | 186 ++++++++++++++++++++++++++++----------
lib/intel_batchbuffer.h | 29 +++---
lib/intel_bufops.c | 13 ++-
lib/intel_bufops.h | 6 ++
lib/media_spin.c | 2 -
tests/i915/api_intel_bb.c | 7 --
6 files changed, 169 insertions(+), 74 deletions(-)
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 045f3f157..1b234b14d 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -1261,6 +1261,8 @@ static inline uint64_t __intel_bb_get_offset(struct intel_bb *ibb,
* If we do reset without purging caches we use addresses from intel-bb cache
* during execbuf objects construction.
*
+ * If we do reset with purging caches allocator entries are freed as well.
+ *
* Returns:
*
* Pointer the intel_bb, asserts on failure.
@@ -1303,6 +1305,7 @@ __intel_bb_create(int i915, uint32_t ctx, uint32_t size, bool do_relocs,
ibb->size = size;
ibb->alignment = 4096;
ibb->ctx = ctx;
+ ibb->vm_id = 0;
ibb->batch = calloc(1, size);
igt_assert(ibb->batch);
ibb->ptr = ibb->batch;
@@ -1317,6 +1320,8 @@ __intel_bb_create(int i915, uint32_t ctx, uint32_t size, bool do_relocs,
false);
ibb->batch_offset = object->offset;
+ IGT_INIT_LIST_HEAD(&ibb->intel_bufs);
+
ibb->refcount = 1;
return ibb;
@@ -1508,6 +1513,22 @@ static void __intel_bb_destroy_cache(struct intel_bb *ibb)
ibb->root = NULL;
}
+static void __intel_bb_detach_intel_bufs(struct intel_bb *ibb)
+{
+ struct intel_buf *entry, *tmp;
+
+ igt_list_for_each_entry_safe(entry, tmp, &ibb->intel_bufs, link)
+ intel_bb_detach_intel_buf(ibb, entry);
+}
+
+static void __intel_bb_remove_intel_bufs(struct intel_bb *ibb)
+{
+ struct intel_buf *entry, *tmp;
+
+ igt_list_for_each_entry_safe(entry, tmp, &ibb->intel_bufs, link)
+ intel_bb_remove_intel_buf(ibb, entry);
+}
+
/**
* intel_bb_destroy:
* @ibb: pointer to intel_bb
@@ -1521,6 +1542,7 @@ void intel_bb_destroy(struct intel_bb *ibb)
ibb->refcount--;
igt_assert_f(ibb->refcount == 0, "Trying to destroy referenced bb!");
+ __intel_bb_remove_intel_bufs(ibb);
__intel_bb_destroy_relocations(ibb);
__intel_bb_destroy_objects(ibb);
__intel_bb_destroy_cache(ibb);
@@ -1544,6 +1566,10 @@ void intel_bb_destroy(struct intel_bb *ibb)
* @purge_objects_cache: if true destroy internal execobj and relocs + cache
*
* Recreate batch bo when there's no additional reference.
+ *
+ * When purge_object_cache == true we destroy cache as well as remove intel_buf
+ * from intel-bb tracking list. Removing intel_bufs releases their addresses
+ * in the allocator.
*/
void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache)
@@ -1569,8 +1595,10 @@ void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache)
__intel_bb_destroy_objects(ibb);
__reallocate_objects(ibb);
- if (purge_objects_cache)
+ if (purge_objects_cache) {
+ __intel_bb_remove_intel_bufs(ibb);
__intel_bb_destroy_cache(ibb);
+ }
/*
* When we use allocators we're in no-reloc mode so we have to free
@@ -1621,6 +1649,50 @@ int intel_bb_sync(struct intel_bb *ibb)
return ret;
}
+uint64_t intel_bb_assign_vm(struct intel_bb *ibb, uint64_t allocator,
+ uint32_t vm_id)
+{
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ uint64_t prev_allocator = ibb->allocator_handle;
+ bool closed = false;
+
+ if (ibb->vm_id == vm_id) {
+ igt_debug("Skipping to assign same vm_id: %u\n", vm_id);
+ return 0;
+ }
+
+ /* Cannot switch if someone keeps bb refcount */
+ igt_assert(ibb->refcount == 1);
+
+ /* Detach intel_bufs and remove bb handle */
+ __intel_bb_detach_intel_bufs(ibb);
+ intel_bb_remove_object(ibb, ibb->handle, ibb->batch_offset, ibb->size);
+
+ /* Cache + objects are not valid after change anymore */
+ __intel_bb_destroy_objects(ibb);
+ __intel_bb_destroy_cache(ibb);
+
+ /* Attach new allocator */
+ ibb->allocator_handle = allocator;
+
+ /* Setparam */
+ ibb->vm_id = vm_id;
+
+ /* Skip set param, we likely return to default vm */
+ if (vm_id) {
+ arg.ctx_id = ibb->ctx;
+ arg.value = vm_id;
+ gem_context_set_param(ibb->i915, &arg);
+ }
+
+ /* Recreate bb */
+ intel_bb_reset(ibb, false);
+
+ return closed ? 0 : prev_allocator;
+}
+
/*
* intel_bb_print:
* @ibb: pointer to intel_bb
@@ -1875,22 +1947,13 @@ intel_bb_add_object(struct intel_bb *ibb, uint32_t handle, uint64_t size,
object->offset = offset;
- /* Limit current offset to gtt size */
- if (offset != INTEL_BUF_INVALID_ADDRESS) {
- object->offset = CANONICAL(offset & (ibb->gtt_size - 1));
- } else {
- object->offset = __intel_bb_get_offset(ibb,
- handle, size,
- object->alignment);
- }
-
if (write)
object->flags |= EXEC_OBJECT_WRITE;
if (ibb->supports_48b_address)
object->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
- if (ibb->uses_full_ppgtt && ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE)
+ if (ibb->uses_full_ppgtt && !ibb->enforce_relocs)
object->flags |= EXEC_OBJECT_PINNED;
return object;
@@ -1927,6 +1990,9 @@ __intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf,
{
struct drm_i915_gem_exec_object2 *obj;
+ igt_assert(ibb);
+ igt_assert(buf);
+ igt_assert(!buf->ibb || buf->ibb == ibb);
igt_assert(ALIGN(alignment, 4096) == alignment);
if (!alignment) {
@@ -1951,6 +2017,13 @@ __intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf,
if (!ibb->enforce_relocs)
obj->alignment = alignment;
+ if (igt_list_empty(&buf->link)) {
+ igt_list_add_tail(&buf->link, &ibb->intel_bufs);
+ buf->ibb = ibb;
+ } else {
+ igt_assert(buf->ibb == ibb);
+ }
+
return obj;
}
@@ -1967,18 +2040,53 @@ intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *bu
return __intel_bb_add_intel_buf(ibb, buf, alignment, write);
}
+void intel_bb_detach_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
+{
+ igt_assert(ibb);
+ igt_assert(buf);
+ igt_assert(!buf->ibb || buf->ibb == ibb);
+
+ if (!igt_list_empty(&buf->link)) {
+ buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
+ buf->ibb = NULL;
+ igt_list_del_init(&buf->link);
+ }
+}
+
bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
{
- bool removed = intel_bb_remove_object(ibb, buf->handle,
- buf->addr.offset,
- intel_buf_bo_size(buf));
+ bool removed;
- if (removed)
+ igt_assert(ibb);
+ igt_assert(buf);
+ igt_assert(!buf->ibb || buf->ibb == ibb);
+
+ if (igt_list_empty(&buf->link))
+ return false;
+
+ removed = intel_bb_remove_object(ibb, buf->handle,
+ buf->addr.offset,
+ intel_buf_bo_size(buf));
+ if (removed) {
buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
+ buf->ibb = NULL;
+ igt_list_del_init(&buf->link);
+ }
return removed;
}
+void intel_bb_print_intel_bufs(struct intel_bb *ibb)
+{
+ struct intel_buf *entry;
+
+ igt_list_for_each_entry(entry, &ibb->intel_bufs, link) {
+ igt_info("handle: %u, ibb: %p, offset: %lx\n",
+ entry->handle, entry->ibb,
+ (long) entry->addr.offset);
+ }
+}
+
struct drm_i915_gem_exec_object2 *
intel_bb_find_object(struct intel_bb *ibb, uint32_t handle)
{
@@ -2361,6 +2469,7 @@ static void update_offsets(struct intel_bb *ibb,
struct drm_i915_gem_exec_object2 *objects)
{
struct drm_i915_gem_exec_object2 *object;
+ struct intel_buf *entry;
uint32_t i;
for (i = 0; i < ibb->num_objects; i++) {
@@ -2372,11 +2481,23 @@ static void update_offsets(struct intel_bb *ibb,
if (i == 0)
ibb->batch_offset = object->offset;
}
+
+ igt_list_for_each_entry(entry, &ibb->intel_bufs, link) {
+ object = intel_bb_find_object(ibb, entry->handle);
+ igt_assert(object);
+
+ if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE)
+ igt_assert(object->offset == entry->addr.offset);
+ else
+ entry->addr.offset = object->offset;
+
+ entry->addr.ctx = ibb->ctx;
+ }
}
#define LINELEN 76
/*
- * @__intel_bb_exec:
+ * __intel_bb_exec:
* @ibb: pointer to intel_bb
* @end_offset: offset of the last instruction in the bb
* @flags: flags passed directly to execbuf
@@ -2416,6 +2537,9 @@ static int __intel_bb_exec(struct intel_bb *ibb, uint32_t end_offset,
if (ibb->dump_base64)
intel_bb_dump_base64(ibb, LINELEN);
+ /* For debugging on CI, remove in final series */
+ intel_bb_dump_execbuf(ibb, &execbuf);
+
ret = __gem_execbuf_wr(ibb->i915, &execbuf);
if (ret) {
intel_bb_dump_execbuf(ibb, &execbuf);
@@ -2493,36 +2617,6 @@ uint64_t intel_bb_get_object_offset(struct intel_bb *ibb, uint32_t handle)
return (*found)->offset;
}
-/**
- * intel_bb_object_offset_to_buf:
- * @ibb: pointer to intel_bb
- * @buf: buffer we want to store last exec offset and context id
- *
- * Copy object offset used in the batch to intel_buf to allow caller prepare
- * other batch likely without relocations.
- */
-bool intel_bb_object_offset_to_buf(struct intel_bb *ibb, struct intel_buf *buf)
-{
- struct drm_i915_gem_exec_object2 object = { .handle = buf->handle };
- struct drm_i915_gem_exec_object2 **found;
-
- igt_assert(ibb);
- igt_assert(buf);
-
- found = tfind((void *)&object, &ibb->root, __compare_objects);
- if (!found) {
- buf->addr.offset = 0;
- buf->addr.ctx = ibb->ctx;
-
- return false;
- }
-
- buf->addr.offset = (*found)->offset & (ibb->gtt_size - 1);
- buf->addr.ctx = ibb->ctx;
-
- return true;
-}
-
/*
* intel_bb_emit_bbe:
* @ibb: batchbuffer
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index 702052d22..6f148713b 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -6,6 +6,7 @@
#include <i915_drm.h>
#include "igt_core.h"
+#include "igt_list.h"
#include "intel_reg.h"
#include "drmtest.h"
#include "intel_allocator.h"
@@ -464,6 +465,7 @@ struct intel_bb {
bool uses_full_ppgtt;
uint32_t ctx;
+ uint32_t vm_id;
/* Cache */
void *root;
@@ -481,6 +483,9 @@ struct intel_bb {
uint32_t num_relocs;
uint32_t allocated_relocs;
+ /* Tracked intel_bufs */
+ struct igt_list_head intel_bufs;
+
/*
* BO recreate in reset path only when refcount == 0
* Currently we don't need to use atomics because intel_bb
@@ -517,29 +522,15 @@ static inline void intel_bb_unref(struct intel_bb *ibb)
void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache);
int intel_bb_sync(struct intel_bb *ibb);
+
+uint64_t intel_bb_assign_vm(struct intel_bb *ibb, uint64_t allocator,
+ uint32_t vm_id);
+
void intel_bb_print(struct intel_bb *ibb);
void intel_bb_dump(struct intel_bb *ibb, const char *filename);
void intel_bb_set_debug(struct intel_bb *ibb, bool debug);
void intel_bb_set_dump_base64(struct intel_bb *ibb, bool dump);
-/*
-static inline uint64_t
-intel_bb_set_default_object_alignment(struct intel_bb *ibb, uint64_t alignment)
-{
- uint64_t old = ibb->alignment;
-
- ibb->alignment = alignment;
-
- return old;
-}
-
-static inline uint64_t
-intel_bb_get_default_object_alignment(struct intel_bb *ibb)
-{
- return ibb->alignment;
-}
-*/
-
static inline uint32_t intel_bb_offset(struct intel_bb *ibb)
{
return (uint32_t) ((uint8_t *) ibb->ptr - (uint8_t *) ibb->batch);
@@ -597,7 +588,9 @@ intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf, bool write);
struct drm_i915_gem_exec_object2 *
intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *buf,
uint64_t alignment, bool write);
+void intel_bb_detach_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
+void intel_bb_print_intel_bufs(struct intel_bb *ibb);
struct drm_i915_gem_exec_object2 *
intel_bb_find_object(struct intel_bb *ibb, uint32_t handle);
diff --git a/lib/intel_bufops.c b/lib/intel_bufops.c
index 00be2bd04..3c1cca0cf 100644
--- a/lib/intel_bufops.c
+++ b/lib/intel_bufops.c
@@ -727,6 +727,7 @@ static void __intel_buf_init(struct buf_ops *bops,
buf->bops = bops;
buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
+ IGT_INIT_LIST_HEAD(&buf->link);
if (compression) {
int aux_width, aux_height;
@@ -827,13 +828,23 @@ void intel_buf_init(struct buf_ops *bops,
*
* Function closes gem BO inside intel_buf if bo is owned by intel_buf.
* For handle passed from the caller intel_buf doesn't take ownership and
- * doesn't close it in close()/destroy() paths.
+ * doesn't close it in close()/destroy() paths. When intel_buf was previously
+ * added to intel_bb (intel_bb_add_intel_buf() call) it is tracked there and
+ * must be removed from its internal structures.
*/
void intel_buf_close(struct buf_ops *bops, struct intel_buf *buf)
{
igt_assert(bops);
igt_assert(buf);
+ /* If buf is tracked by some intel_bb ensure it will be removed there */
+ if (buf->ibb) {
+ intel_bb_remove_intel_buf(buf->ibb, buf);
+ buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
+ buf->ibb = NULL;
+ IGT_INIT_LIST_HEAD(&buf->link);
+ }
+
if (buf->is_owner)
gem_close(bops->fd, buf->handle);
}
diff --git a/lib/intel_bufops.h b/lib/intel_bufops.h
index 54480bff6..1a3d86925 100644
--- a/lib/intel_bufops.h
+++ b/lib/intel_bufops.h
@@ -2,6 +2,7 @@
#define __INTEL_BUFOPS_H__
#include <stdint.h>
+#include "igt_list.h"
#include "igt_aux.h"
#include "intel_batchbuffer.h"
@@ -13,6 +14,7 @@ struct buf_ops;
struct intel_buf {
struct buf_ops *bops;
+
bool is_owner;
uint32_t handle;
uint64_t size;
@@ -40,6 +42,10 @@ struct intel_buf {
uint32_t ctx;
} addr;
+ /* Tracking */
+ struct intel_bb *ibb;
+ struct igt_list_head link;
+
/* CPU mapping */
uint32_t *ptr;
bool cpu_write;
diff --git a/lib/media_spin.c b/lib/media_spin.c
index 5da469a52..d2345d153 100644
--- a/lib/media_spin.c
+++ b/lib/media_spin.c
@@ -132,7 +132,6 @@ gen8_media_spinfunc(int i915, struct intel_buf *buf, uint32_t spins)
intel_bb_exec(ibb, intel_bb_offset(ibb),
I915_EXEC_DEFAULT | I915_EXEC_NO_RELOC, false);
- intel_bb_object_offset_to_buf(ibb, buf);
intel_bb_destroy(ibb);
}
@@ -186,6 +185,5 @@ gen9_media_spinfunc(int i915, struct intel_buf *buf, uint32_t spins)
intel_bb_exec(ibb, intel_bb_offset(ibb),
I915_EXEC_DEFAULT | I915_EXEC_NO_RELOC, false);
- intel_bb_object_offset_to_buf(ibb, buf);
intel_bb_destroy(ibb);
}
diff --git a/tests/i915/api_intel_bb.c b/tests/i915/api_intel_bb.c
index c6c943506..77dfb6854 100644
--- a/tests/i915/api_intel_bb.c
+++ b/tests/i915/api_intel_bb.c
@@ -828,9 +828,6 @@ static void offset_control(struct buf_ops *bops)
print_buf(dst2, "dst2");
}
- igt_assert(intel_bb_object_offset_to_buf(ibb, src) == true);
- igt_assert(intel_bb_object_offset_to_buf(ibb, dst1) == true);
- igt_assert(intel_bb_object_offset_to_buf(ibb, dst2) == true);
poff_src = src->addr.offset;
poff_dst1 = dst1->addr.offset;
poff_dst2 = dst2->addr.offset;
@@ -853,10 +850,6 @@ static void offset_control(struct buf_ops *bops)
I915_EXEC_DEFAULT | I915_EXEC_NO_RELOC, false);
intel_bb_sync(ibb);
- igt_assert(intel_bb_object_offset_to_buf(ibb, src) == true);
- igt_assert(intel_bb_object_offset_to_buf(ibb, dst1) == true);
- igt_assert(intel_bb_object_offset_to_buf(ibb, dst2) == true);
- igt_assert(intel_bb_object_offset_to_buf(ibb, dst3) == true);
igt_assert(poff_src == src->addr.offset);
igt_assert(poff_dst1 == dst1->addr.offset);
igt_assert(poff_dst2 == dst2->addr.offset);
--
2.26.0
More information about the igt-dev
mailing list