[Intel-gfx] [PATCH 133/190] drm/i915: Convert known clflush paths over to clflush_cache_range()
Chris Wilson
chris at chris-wilson.co.uk
Mon Jan 11 02:45:17 PST 2016
A step towards removing redundant functions from the kernel, in this
case both drm and arch/86 define a clflush(addr, range) operation. The
difference is that drm_clflush_virt_range() provides a wbinvd()
fallback, but along most paths, we only clflush when we know we can.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem.c | 13 +++++--------
drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +-
drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ++--
3 files changed, 8 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c3d43921bc98..d81821c6f9a1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -614,8 +614,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap_atomic(page);
if (needs_clflush)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
+ clflush_cache_range(vaddr + shmem_page_offset, page_length);
ret = __copy_to_user_inatomic(user_data,
vaddr + shmem_page_offset,
page_length);
@@ -639,9 +638,9 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
start = round_down(start, 128);
end = round_up(end, 128);
- drm_clflush_virt_range((void *)start, end - start);
+ clflush_cache_range((void *)start, end - start);
} else {
- drm_clflush_virt_range(addr, length);
+ clflush_cache_range(addr, length);
}
}
@@ -934,13 +933,11 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap_atomic(page);
if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
+ clflush_cache_range(vaddr + shmem_page_offset, page_length);
ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
user_data, page_length);
if (needs_clflush_after)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
+ clflush_cache_range(vaddr + shmem_page_offset, page_length);
kunmap_atomic(vaddr);
return ret ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0aadfaee2150..b8af904ad12c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -357,7 +357,7 @@ static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ clflush_cache_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 894eb8089296..a66213b2450e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -395,8 +395,8 @@ intel_engine_sync_index(struct intel_engine_cs *ring,
static inline void
intel_flush_status_page(struct intel_engine_cs *ring, int reg)
{
- drm_clflush_virt_range(&ring->status_page.page_addr[reg],
- sizeof(uint32_t));
+ clflush_cache_range(&ring->status_page.page_addr[reg],
+ sizeof(uint32_t));
}
static inline u32
--
2.7.0.rc3
More information about the Intel-gfx
mailing list