<div dir="ltr">Reviewed-by: Jason Ekstrand <<a href="mailto:jason@jlekstrand.net">jason@jlekstrand.net</a>><br></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Wed, Jan 23, 2019 at 6:41 PM Rafael Antognolli <<a href="mailto:rafael.antognolli@intel.com">rafael.antognolli@intel.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Accessing bo->map and then pool->center_bo_offset without a lock is<br>
racy. One way of avoiding such race condition is to store the bo->map +<br>
center_bo_offset into pool->map at the time the block pool is growing,<br>
which happens within a lock.<br>
<br>
v2: Only set pool->map if not using softpin (Jason).<br>
v3: Move things around and only update center_bo_offset if not using<br>
softpin too (Jason).<br>
<br>
Cc: Jason Ekstrand <<a href="mailto:jason@jlekstrand.net" target="_blank">jason@jlekstrand.net</a>><br>
Reported-by: Ian Romanick <<a href="mailto:idr@freedesktop.org" target="_blank">idr@freedesktop.org</a>><br>
Bugzilla: <a href="https://bugs.freedesktop.org/show_bug.cgi?id=109442" rel="noreferrer" target="_blank">https://bugs.freedesktop.org/show_bug.cgi?id=109442</a><br>
Fixes: fc3f58832015cbb177179e7f3420d3611479b4a9<br>
---<br>
src/intel/vulkan/anv_allocator.c | 20 ++++++++++++++------<br>
src/intel/vulkan/anv_private.h | 13 +++++++++++++<br>
2 files changed, 27 insertions(+), 6 deletions(-)<br>
<br>
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c<br>
index 89f26789c85..006175c8c65 100644<br>
--- a/src/intel/vulkan/anv_allocator.c<br>
+++ b/src/intel/vulkan/anv_allocator.c<br>
@@ -436,7 +436,9 @@ anv_block_pool_init(struct anv_block_pool *pool,<br>
pool->bo_flags = bo_flags;<br>
pool->nbos = 0;<br>
pool->size = 0;<br>
+ pool->center_bo_offset = 0;<br>
pool->start_address = gen_canonical_address(start_address);<br>
+ pool->map = NULL;<br>
<br>
/* This pointer will always point to the first BO in the list */<br>
pool->bo = &pool->bos[0];<br>
@@ -536,6 +538,7 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,<br>
if (map == MAP_FAILED)<br>
return vk_errorf(pool->device->instance, pool->device,<br>
VK_ERROR_MEMORY_MAP_FAILED, "gem mmap failed: %m");<br>
+ assert(center_bo_offset == 0);<br>
} else {<br>
/* Just leak the old map until we destroy the pool. We can't munmap it<br>
* without races or imposing locking on the block allocate fast path. On<br>
@@ -549,6 +552,11 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,<br>
if (map == MAP_FAILED)<br>
return vk_errorf(pool->device->instance, pool->device,<br>
VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");<br>
+<br>
+ /* Now that we mapped the new memory, we can write the new<br>
+ * center_bo_offset back into pool and update pool->map. */<br>
+ pool->center_bo_offset = center_bo_offset;<br>
+ pool->map = map + center_bo_offset;<br>
gem_handle = anv_gem_userptr(pool->device, map, size);<br>
if (gem_handle == 0) {<br>
munmap(map, size);<br>
@@ -573,10 +581,6 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,<br>
if (!pool->device->info.has_llc)<br>
anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_CACHED);<br>
<br>
- /* Now that we successfull allocated everything, we can write the new<br>
- * center_bo_offset back into pool. */<br>
- pool->center_bo_offset = center_bo_offset;<br>
-<br>
/* For block pool BOs we have to be a bit careful about where we place them<br>
* in the GTT. There are two documented workarounds for state base address<br>
* placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset<br>
@@ -670,8 +674,12 @@ anv_block_pool_get_bo(struct anv_block_pool *pool, int32_t *offset)<br>
void*<br>
anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)<br>
{<br>
- struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);<br>
- return bo->map + pool->center_bo_offset + offset;<br>
+ if (pool->bo_flags & EXEC_OBJECT_PINNED) {<br>
+ struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);<br>
+ return bo->map + offset;<br>
+ } else {<br>
+ return pool->map + offset;<br>
+ }<br>
}<br>
<br>
/** Grows and re-centers the block pool.<br>
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h<br>
index 3889065c93c..110b2ccf023 100644<br>
--- a/src/intel/vulkan/anv_private.h<br>
+++ b/src/intel/vulkan/anv_private.h<br>
@@ -663,6 +663,19 @@ struct anv_block_pool {<br>
*/<br>
uint32_t center_bo_offset;<br>
<br>
+ /* Current memory map of the block pool. This pointer may or may not<br>
+ * point to the actual beginning of the block pool memory. If<br>
+ * anv_block_pool_alloc_back has ever been called, then this pointer<br>
+ * will point to the "center" position of the buffer and all offsets<br>
+ * (negative or positive) given out by the block pool alloc functions<br>
+ * will be valid relative to this pointer.<br>
+ *<br>
+ * In particular, map == bo.map + center_offset<br>
+ *<br>
+ * DO NOT access this pointer directly. Use anv_block_pool_map() instead,<br>
+ * since it will handle the softpin case as well, where this points to NULL.<br>
+ */<br>
+ void *map;<br>
int fd;<br>
<br>
/**<br>
-- <br>
2.17.2<br>
<br>
</blockquote></div>