[Intel-gfx] [PATCH 36/37] drm/i915: iomem fixes for GEM interface
Chris Wilson
chris at chris-wilson.co.uk
Wed Mar 10 23:45:23 CET 2010
In particular, we note that the pages we are copying from in
slow_kernel_write() are not in fact __user pages and nor can they fault.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem.c | 62 ++++++++++++++++----------------------
1 files changed, 26 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1dc352b..a676646 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -144,7 +144,7 @@ fast_shmem_read(struct page **pages,
char __user *data,
int length)
{
- char __iomem *vaddr;
+ void *vaddr;
int unwritten;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
@@ -175,7 +175,7 @@ slow_shmem_copy(struct page *dst_page,
int src_offset,
int length)
{
- char *dst_vaddr, *src_vaddr;
+ void *dst_vaddr, *src_vaddr;
dst_vaddr = kmap_atomic(dst_page, KM_USER0);
if (dst_vaddr == NULL)
@@ -203,7 +203,7 @@ slow_shmem_bit17_copy(struct page *gpu_page,
int length,
int is_read)
{
- char *gpu_vaddr, *cpu_vaddr;
+ void *gpu_vaddr, *cpu_vaddr;
/* Use the unswizzled path if this page isn't affected. */
if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
@@ -509,12 +509,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
*/
static inline int
-fast_user_write(struct io_mapping *mapping,
+fast_user_write(struct io_mapping __iomem *mapping,
loff_t page_base, int page_offset,
char __user *user_data,
int length)
{
- char *vaddr_atomic;
+ void __iomem *vaddr_atomic;
unsigned long unwritten;
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
@@ -530,25 +530,22 @@ fast_user_write(struct io_mapping *mapping,
* page faults
*/
-static inline int
-slow_kernel_write(struct io_mapping *mapping,
+static inline void
+slow_kernel_write(struct io_mapping __iomem *mapping,
loff_t gtt_base, int gtt_offset,
struct page *user_page, int user_offset,
int length)
{
- char *src_vaddr, *dst_vaddr;
- unsigned long unwritten;
-
- dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
- src_vaddr = kmap_atomic(user_page, KM_USER1);
- unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
- kunmap_atomic(src_vaddr, KM_USER1);
- io_mapping_unmap_atomic(dst_vaddr);
- if (unwritten)
- return -EFAULT;
- return 0;
+ void __iomem *dst_vaddr;
+ void *src_vaddr;
+
+ dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
+ src_vaddr = kmap(user_page);
+ memcpy_toio(dst_vaddr + gtt_offset,
+ src_vaddr + user_offset,
+ length);
+ kunmap(src_vaddr);
+ io_mapping_unmap(dst_vaddr);
}
static inline int
@@ -557,7 +554,7 @@ fast_shmem_write(struct page **pages,
char __user *data,
int length)
{
- char __iomem *vaddr;
+ void *vaddr;
unsigned long unwritten;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
@@ -721,18 +718,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
-
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
- if (ret)
- goto out_unpin_object;
+ slow_kernel_write(dev_priv->mm.gtt_mapping,
+ gtt_page_base, gtt_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
remain -= page_length;
offset += page_length;
@@ -3276,7 +3266,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
- void __iomem *reloc_page;
bool need_fence;
need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@@ -3321,7 +3310,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_obj_priv;
uint32_t reloc_val, reloc_offset;
- uint32_t __iomem *reloc_entry;
+ u32 __iomem *reloc_entry;
+ void __iomem *reloc_page;
target_obj = drm_gem_object_lookup(obj->dev, file_priv,
reloc->target_handle);
@@ -3451,8 +3441,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
(reloc_offset &
~(PAGE_SIZE - 1)));
- reloc_entry = (uint32_t __iomem *)(reloc_page +
- (reloc_offset & (PAGE_SIZE - 1)));
+ reloc_entry = (u32 __iomem *)(reloc_page +
+ (reloc_offset & (PAGE_SIZE - 1)));
reloc_val = target_obj_priv->gtt_offset + reloc->delta;
#if WATCH_BUF
--
1.7.0
More information about the Intel-gfx
mailing list