[Intel-gfx] [PATCH] drm/i915/pages: some more unsigned long conversions
Matthew Auld
matthew.auld at intel.com
Fri Apr 17 09:30:46 UTC 2020
unsigned long is always preferred when indexing a page, especially when
the caller expects it.
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_object.h | 6 +++---
drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 2 +-
drivers/gpu/drm/i915/gem/i915_gem_pages.c | 11 ++++++-----
3 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2faa481cc18f..49f88a3409b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -252,15 +252,15 @@ int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n, unsigned int *offset);
+ unsigned long n, unsigned int *offset);
struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
- unsigned int n);
+ unsigned long n);
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
+ unsigned long n);
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 54ee658bb168..0399940fff94 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -236,7 +236,7 @@ struct drm_i915_gem_object {
struct i915_gem_object_page_iter {
struct scatterlist *sg_pos;
- unsigned int sg_idx; /* in pages, but 32bit eek! */
+ unsigned long sg_idx; /* in pages */
struct radix_tree_root radix;
struct mutex lock; /* protects this cache */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 5d855fcd5c0f..cb215a0d7efb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -409,12 +409,13 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
+ unsigned long n,
unsigned int *offset)
{
struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
struct scatterlist *sg;
- unsigned int idx, count;
+ unsigned long idx;
+ unsigned int count;
might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
@@ -445,7 +446,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
while (idx + count <= n) {
void *entry;
- unsigned long i;
+ unsigned int i;
int ret;
/* If we cannot allocate and insert this entry, or the
@@ -521,7 +522,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
}
struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned long n)
{
struct scatterlist *sg;
unsigned int offset;
@@ -535,7 +536,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
+ unsigned long n)
{
struct page *page;
--
2.20.1
More information about the Intel-gfx
mailing list