[Intel-gfx] [PATCH 10/28] drm/mm: Pack allocated/scanned boolean into a bitfield
Chris Wilson
chris at chris-wilson.co.uk
Mon Aug 26 07:21:31 UTC 2019
The ulterior motive to switching the booleans over to bitops is to
allow use of the allocated flag as a bitlock.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/drm_mm.c | 36 +++++++++++--------
.../gpu/drm/i915/gem/i915_gem_execbuffer.c | 6 ++--
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 2 +-
drivers/gpu/drm/i915/i915_gem.c | 16 ++++-----
drivers/gpu/drm/i915/i915_gem_evict.c | 2 +-
drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +-
drivers/gpu/drm/i915/i915_vma.c | 4 +--
drivers/gpu/drm/i915/i915_vma.h | 2 +-
drivers/gpu/drm/selftests/test-drm_mm.c | 14 ++++----
include/drm/drm_mm.h | 7 ++--
10 files changed, 49 insertions(+), 42 deletions(-)
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 4581c5387372..211967006cec 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
node->__subtree_last = LAST(node);
- if (hole_node->allocated) {
+ if (drm_mm_node_allocated(hole_node)) {
rb = &hole_node->rb;
while (rb) {
parent = rb_entry(rb, struct drm_mm_node, rb);
@@ -424,9 +424,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
node->hole_size = 0;
rm_hole(hole);
@@ -543,9 +543,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
node->color = color;
node->hole_size = 0;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
rm_hole(hole);
if (adj_start > hole_start)
@@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
+static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
+{
+ return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+}
+
/**
* drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove
@@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
prev_node = list_prev_entry(node, node_list);
@@ -584,11 +589,12 @@ void drm_mm_remove_node(struct drm_mm_node *node)
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
- node->allocated = false;
if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node);
add_hole(prev_node);
+
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
EXPORT_SYMBOL(drm_mm_remove_node);
@@ -605,7 +611,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
struct drm_mm *mm = old->mm;
- DRM_MM_BUG_ON(!old->allocated);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
*new = *old;
@@ -622,8 +628,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
&mm->holes_addr);
}
- old->allocated = false;
- new->allocated = true;
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
}
EXPORT_SYMBOL(drm_mm_replace_node);
@@ -731,9 +736,9 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
u64 adj_start, adj_end;
DRM_MM_BUG_ON(node->mm != mm);
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
- node->scanned_block = true;
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
+ __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
mm->scan_active++;
/* Remove this block from the node_list so that we enlarge the hole
@@ -818,8 +823,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(node->mm != scan->mm);
- DRM_MM_BUG_ON(!node->scanned_block);
- node->scanned_block = false;
+ DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--;
@@ -837,6 +841,8 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
list_next_entry(node, node_list));
list_add(&node->node_list, &prev_node->node_list);
+ __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+
return (node->start + node->size > scan->hit_start &&
node->start < scan->hit_end);
}
@@ -917,7 +923,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- mm->head_node.allocated = false;
+ mm->head_node.flags = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = -size;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b5f6937369ea..493f07806b08 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -902,7 +902,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
- cache->node.allocated = false;
+ cache->node.flags = 0;
cache->rq = NULL;
cache->rq_size = 0;
}
@@ -963,7 +963,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
@@ -1056,7 +1056,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
offset = cache->node.start;
- if (cache->node.allocated) {
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 296a82603be0..07fc6f28abcd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -401,7 +401,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{
struct drm_mm_node *node = &ggtt->uc_fw;
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 95e7c52cf8ed..814f62fca727 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -351,12 +351,12 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
+ node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_unlock;
- GEM_BUG_ON(!node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
mutex_unlock(&i915->drm.struct_mutex);
@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
mutex_lock(&i915->drm.struct_mutex);
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
@@ -561,12 +561,12 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
+ node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
- GEM_BUG_ON(!node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
mutex_unlock(&i915->drm.struct_mutex);
@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
/* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin:
mutex_lock(&i915->drm.struct_mutex);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 52c86c6e0673..7abcac3b5e2e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -301,7 +301,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 86979f391dd0..b001d8b0f787 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2552,7 +2552,7 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
u64 *start,
u64 *end)
{
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
*start += I915_GTT_PAGE_SIZE;
/* Also leave a space between the unallocated reserved node after the
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ddb03cbcbf60..49fe4a664d29 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -482,7 +482,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
static bool color_differs(struct drm_mm_node *node, unsigned long color)
{
- return node->allocated && node->color != color;
+ return drm_mm_node_allocated(node) && node->color != color;
}
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
@@ -800,7 +800,7 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->node.allocated);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->fence);
mutex_lock(&vma->vm->mutex);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 889fc7cb910a..af2ef0a51455 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -214,7 +214,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(upper_32_bits(vma->node.start));
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
return lower_32_bits(vma->node.start);
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 388f9844f4ba..9aabe82dcd3a 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -854,7 +854,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node before start: node=%llx+%llu, start=%llx\n",
node->start, node->size, start);
return false;
@@ -863,7 +863,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node after end: node=%llx+%llu, end=%llx\n",
node->start, node->size, end);
return false;
@@ -1156,12 +1156,12 @@ static void show_holes(const struct drm_mm *mm, int count)
struct drm_mm_node *next = list_next_entry(hole, node_list);
const char *node1 = NULL, *node2 = NULL;
- if (hole->allocated)
+ if (drm_mm_node_allocated(hole))
node1 = kasprintf(GFP_KERNEL,
"[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color);
- if (next->allocated)
+ if (drm_mm_node_allocated(next))
node2 = kasprintf(GFP_KERNEL,
", [%llx + %lld, color=%ld]",
next->start, next->size, next->color);
@@ -1900,18 +1900,18 @@ static void separate_adjacent_colors(const struct drm_mm_node *node,
u64 *start,
u64 *end)
{
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
++*start;
node = list_next_entry(node, node_list);
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
--*end;
}
static bool colors_abutt(const struct drm_mm_node *node)
{
if (!drm_mm_hole_follows(node) &&
- list_next_entry(node, node_list)->allocated) {
+ drm_mm_node_allocated(list_next_entry(node, node_list))) {
pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
node->color, node->start, node->size,
list_next_entry(node, node_list)->color,
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 2c3bbb43c7d1..d7939c054259 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -168,8 +168,9 @@ struct drm_mm_node {
struct rb_node rb_hole_addr;
u64 __subtree_last;
u64 hole_size;
- bool allocated : 1;
- bool scanned_block : 1;
+ unsigned long flags;
+#define DRM_MM_NODE_ALLOCATED_BIT 0
+#define DRM_MM_NODE_SCANNED_BIT 1
#ifdef CONFIG_DRM_DEBUG_MM
depot_stack_handle_t stack;
#endif
@@ -253,7 +254,7 @@ struct drm_mm_scan {
*/
static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
{
- return node->allocated;
+ return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
/**
--
2.23.0
More information about the Intel-gfx
mailing list