[Intel-gfx] [PATCH v6 3/7] drm/i915: introduce and use i915_gem_object_vmap_range()
Dave Gordon
david.s.gordon at intel.com
Mon Feb 29 21:16:08 UTC 2016
From: Alex Dai <yu.dai at intel.com>
There are several places inside driver where a GEM object is mapped
to kernel virtual space. The mapping may be done either for the whole
object or only a subset of it.
This patch introduces a function i915_gem_object_vmap_range() to
implement the common functionality. The code itself is extracted and
adapted from that in vmap_batch(), but also replaces vmap_obj() and the
open-coded version in i915_gem_dmabuf_vmap().
v2: use obj->pages->nents for iteration within i915_gem_object_vmap;
break when it finishes all desired pages. The caller must pass the
actual page count required. [Tvrtko Ursulin]
v4: renamed to i915_gem_object_vmap_range() to make its function
clearer. [Dave Gordon]
v5: use Chris Wilson's new drm_malloc_gfp() rather than kmalloc() or
drm_malloc_ab(). [Dave Gordon]
v6: changed range checking to not use pages->nents. [Tvrtko Ursulin]
Use sg_nents_for_len() for range check instead. [Dave Gordon]
Pass range parameters in bytes rather than pages (both callers
were converting from bytes to pages anyway, so this reduces the
number of places where the conversion is done).
With this change, we have only one vmap() in the whole driver :)
Signed-off-by: Alex Dai <yu.dai at intel.com>
Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_cmd_parser.c | 32 +-----------------
drivers/gpu/drm/i915/i915_drv.h | 4 +++
drivers/gpu/drm/i915/i915_gem.c | 58 +++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 16 ++-------
drivers/gpu/drm/i915/intel_ringbuffer.c | 24 ++------------
5 files changed, 68 insertions(+), 66 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 814d894..1b2515d 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -863,37 +863,7 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
static u32 *vmap_batch(struct drm_i915_gem_object *obj,
unsigned start, unsigned len)
{
- int i;
- void *addr = NULL;
- struct sg_page_iter sg_iter;
- int first_page = start >> PAGE_SHIFT;
- int last_page = (len + start + 4095) >> PAGE_SHIFT;
- int npages = last_page - first_page;
- struct page **pages;
-
- pages = drm_malloc_ab(npages, sizeof(*pages));
- if (pages == NULL) {
- DRM_DEBUG_DRIVER("Failed to get space for pages\n");
- goto finish;
- }
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
- pages[i++] = sg_page_iter_page(&sg_iter);
- if (i == npages)
- break;
- }
-
- addr = vmap(pages, i, 0, PAGE_KERNEL);
- if (addr == NULL) {
- DRM_DEBUG_DRIVER("Failed to vmap pages\n");
- goto finish;
- }
-
-finish:
- if (pages)
- drm_free_large(pages);
- return (u32*)addr;
+ return i915_gem_object_vmap_range(obj, start, len);
}
/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a4dcb74..12b0717 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2983,6 +2983,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
obj->pages_pin_count--;
}
+void *__must_check i915_gem_object_vmap_range(struct drm_i915_gem_object *obj,
+ unsigned long start,
+ unsigned long nbytes);
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3a..8f12e73 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2400,6 +2400,64 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
return 0;
}
+/**
+ * i915_gem_object_vmap_range - map some or all of a GEM object into kernel space
+ * @obj: the GEM object to be mapped
+ * @start: offset in bytes of the start of the range to be mapped
+ * @len: length in bytes of the range to be mapped
+ *
+ * Map a given range of a GEM object into kernel virtual space. The range will
+ * be extended at both ends, if necessary, to span a whole number of pages. The
+ * caller must make sure the associated pages are gathered and pinned before
+ * calling this function, and is responsible for unmapping the returned address
+ * when it is no longer required.
+ *
+ * Returns the address at which the object has been mapped, or NULL on failure.
+ */
+void *i915_gem_object_vmap_range(struct drm_i915_gem_object *obj,
+ unsigned long start,
+ unsigned long nbytes)
+{
+ struct scatterlist *sg = obj->pages->sgl;
+ struct sg_page_iter sg_iter;
+ struct page **pages;
+ unsigned long first, npages, i;
+ int nents;
+ void *addr;
+
+ /* Check requested range against underlying sg list */
+ nents = sg_nents_for_len(sg, start + nbytes);
+ if (nents < 0) {
+ DRM_DEBUG_DRIVER("Invalid page count\n");
+ return NULL;
+ }
+
+ /* Work in pages from now on */
+ first = start >> PAGE_SHIFT;
+ npages = DIV_ROUND_UP(start + nbytes, PAGE_SIZE) - first;
+
+ pages = drm_malloc_gfp(npages, sizeof(*pages), GFP_TEMPORARY);
+ if (pages == NULL) {
+ DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+ return NULL;
+ }
+
+ i = 0;
+ for_each_sg_page(sg, &sg_iter, nents, first) {
+ pages[i] = sg_page_iter_page(&sg_iter);
+ if (++i >= npages)
+ break;
+ }
+ WARN_ON(i != npages);
+
+ addr = vmap(pages, npages, 0, PAGE_KERNEL);
+ if (addr == NULL)
+ DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+ drm_free_large(pages);
+
+ return addr;
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 1f3eef6..aee4149 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -110,9 +110,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- struct sg_page_iter sg_iter;
- struct page **pages;
- int ret, i;
+ int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -131,16 +129,8 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = -ENOMEM;
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
- if (pages == NULL)
- goto err_unpin;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
- pages[i++] = sg_page_iter_page(&sg_iter);
-
- obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
- drm_free_large(pages);
+ obj->dma_buf_vmapping = i915_gem_object_vmap_range(obj, 0,
+ dma_buf->size >> PAGE_SHIFT);
if (!obj->dma_buf_vmapping)
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a..434a452 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2064,27 +2064,6 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
- struct sg_page_iter sg_iter;
- struct page **pages;
- void *addr;
- int i;
-
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
- if (pages == NULL)
- return NULL;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
- pages[i++] = sg_page_iter_page(&sg_iter);
-
- addr = vmap(pages, i, 0, PAGE_KERNEL);
- drm_free_large(pages);
-
- return addr;
-}
-
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
@@ -2103,7 +2082,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return ret;
}
- ringbuf->virtual_start = vmap_obj(obj);
+ ringbuf->virtual_start = i915_gem_object_vmap_range(obj, 0,
+ ringbuf->size >> PAGE_SHIFT);
if (ringbuf->virtual_start == NULL) {
i915_gem_object_ggtt_unpin(obj);
return -ENOMEM;
--
1.9.1
More information about the Intel-gfx
mailing list