[Intel-gfx] [PATCH v5 3/7] drm/i915: introduce and use i915_gem_object_vmap_range()
Dave Gordon
david.s.gordon at intel.com
Mon Feb 29 11:13:10 UTC 2016
From: Alex Dai <yu.dai at intel.com>
There are several places inside driver where a GEM object is mapped
to kernel virtual space. The mapping may be done either for the whole
object or only a subset of it.
This patch introduces a function i915_gem_object_vmap_range() to
implement the common functionality. The code itself is extracted and
adapted from that in vmap_batch(), but also replaces vmap_obj() and the
open-coded version in i915_gem_dmabuf_vmap().
v2: Use obj->pages->nents for iteration within i915_gem_object_vmap;
break when it finishes all desired pages. The caller must pass the
actual page count required. [Tvrtko Ursulin]
v4: renamed to i915_gem_object_vmap_range() to make its function
clearer. [Dave Gordon]
v5: use Chris Wilson's new drm_malloc_gfp() rather than kmalloc() or
drm_malloc_ab(). [Dave Gordon]
Signed-off-by: Alex Dai <yu.dai at intel.com>
Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_cmd_parser.c | 28 +-------------------
drivers/gpu/drm/i915/i915_drv.h | 4 +++
drivers/gpu/drm/i915/i915_gem.c | 47 +++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 16 +++--------
drivers/gpu/drm/i915/intel_ringbuffer.c | 24 ++---------------
5 files changed, 57 insertions(+), 62 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 814d894..114b55f 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -863,37 +863,11 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
static u32 *vmap_batch(struct drm_i915_gem_object *obj,
unsigned start, unsigned len)
{
- int i;
- void *addr = NULL;
- struct sg_page_iter sg_iter;
int first_page = start >> PAGE_SHIFT;
int last_page = (len + start + 4095) >> PAGE_SHIFT;
int npages = last_page - first_page;
- struct page **pages;
-
- pages = drm_malloc_ab(npages, sizeof(*pages));
- if (pages == NULL) {
- DRM_DEBUG_DRIVER("Failed to get space for pages\n");
- goto finish;
- }
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
- pages[i++] = sg_page_iter_page(&sg_iter);
- if (i == npages)
- break;
- }
-
- addr = vmap(pages, i, 0, PAGE_KERNEL);
- if (addr == NULL) {
- DRM_DEBUG_DRIVER("Failed to vmap pages\n");
- goto finish;
- }
-finish:
- if (pages)
- drm_free_large(pages);
- return (u32*)addr;
+ return i915_gem_object_vmap_range(obj, first_page, npages);
}
/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9e76bfc..918b0bb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2983,6 +2983,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
obj->pages_pin_count--;
}
+void *__must_check i915_gem_object_vmap_range(struct drm_i915_gem_object *obj,
+ unsigned int first,
+ unsigned int npages);
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f68f346..c621b3e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2400,6 +2400,53 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
return 0;
}
+/**
+ * i915_gem_object_vmap_range - map some or all of a GEM object into kernel space
+ * @obj: the GEM object to be mapped
+ * @first: index of the first page of the object to be mapped
+ * @npages: length of the mapping to be created, in pages
+ *
+ * Map a given page range of a GEM object into kernel virtual space. The caller
+ * must make sure the associated pages are gathered and pinned before calling
+ * this function, and is responsible for vunmapping the returned address.
+ *
+ * Returns the address at which the object has been mapped, or NULL on failure.
+ */
+void *i915_gem_object_vmap_range(struct drm_i915_gem_object *obj,
+ unsigned int first,
+ unsigned int npages)
+{
+ struct sg_page_iter sg_iter;
+ struct page **pages;
+ void *addr;
+ int i;
+
+ if (first + npages > obj->pages->nents) {
+ DRM_DEBUG_DRIVER("Invalid page count\n");
+ return NULL;
+ }
+
+ pages = drm_malloc_gfp(npages, sizeof(*pages), GFP_TEMPORARY);
+ if (pages == NULL) {
+ DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+ return NULL;
+ }
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first) {
+ pages[i++] = sg_page_iter_page(&sg_iter);
+ if (i == npages)
+ break;
+ }
+
+ addr = vmap(pages, npages, 0, PAGE_KERNEL);
+ if (addr == NULL)
+ DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+ drm_free_large(pages);
+
+ return addr;
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 1f3eef6..aee4149 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -110,9 +110,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- struct sg_page_iter sg_iter;
- struct page **pages;
- int ret, i;
+ int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -131,16 +129,8 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = -ENOMEM;
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
- if (pages == NULL)
- goto err_unpin;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
- pages[i++] = sg_page_iter_page(&sg_iter);
-
- obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
- drm_free_large(pages);
+ obj->dma_buf_vmapping = i915_gem_object_vmap_range(obj, 0,
+ dma_buf->size >> PAGE_SHIFT);
if (!obj->dma_buf_vmapping)
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a..434a452 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2064,27 +2064,6 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
- struct sg_page_iter sg_iter;
- struct page **pages;
- void *addr;
- int i;
-
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
- if (pages == NULL)
- return NULL;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
- pages[i++] = sg_page_iter_page(&sg_iter);
-
- addr = vmap(pages, i, 0, PAGE_KERNEL);
- drm_free_large(pages);
-
- return addr;
-}
-
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
@@ -2103,7 +2082,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return ret;
}
- ringbuf->virtual_start = vmap_obj(obj);
+ ringbuf->virtual_start = i915_gem_object_vmap_range(obj, 0,
+ ringbuf->size >> PAGE_SHIFT);
if (ringbuf->virtual_start == NULL) {
i915_gem_object_ggtt_unpin(obj);
return -ENOMEM;
--
1.9.1
More information about the Intel-gfx
mailing list