[Intel-gfx] [PATCH 04/43] drm/i915: Pin backing pages for pwrite
Chris Wilson
chris at chris-wilson.co.uk
Fri May 25 14:32:43 CEST 2012
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem.c | 44 ++++++++++++++++++---------------------
1 file changed, 20 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 578b38a..597a1ff 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -60,6 +60,8 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj);
+
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
if (obj->tiling_mode)
@@ -673,7 +675,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
/* Only difference to the fast-path function is that this can handle bit17
@@ -707,7 +709,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
page_do_bit17_swizzling);
kunmap(page);
- return ret;
+ return ret ? -EFAULT : 0;
}
static int
@@ -716,7 +718,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -725,7 +726,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
- int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -751,6 +751,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
+ ret = i915_gem_object_get_pages_gtt(obj);
+ if (ret)
+ return ret;
+
offset = args->offset;
obj->dirty = 1;
@@ -776,17 +780,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
+ page = obj->pages[offset >> PAGE_SHIFT];
+ page_cache_get(page);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -798,27 +793,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
if (ret == 0)
goto next_page;
- hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
-
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+ hit_slowpath = 1;
+
+ if (ret == 0)
+ ret = i915_gem_object_get_pages_gtt(obj);
+
next_page:
set_page_dirty(page);
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
+ page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -1525,6 +1518,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->pages)
return 0;
+ if (obj->madv != I915_MADV_WILLNEED)
+ return -EINVAL;
+
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
--
1.7.10
More information about the Intel-gfx
mailing list