[Intel-gfx] [RFC PATCH 04/42] drm/i915: introduce intel_memory_region

Chris Wilson chris at chris-wilson.co.uk
Thu Feb 14 15:16:45 UTC 2019


Quoting Matthew Auld (2019-02-14 14:57:02)
> +int
> +i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj)
> +{
> +       struct intel_memory_region *mem = obj->memory_region;
> +       resource_size_t size = obj->base.size;
> +       struct sg_table *st;
> +       struct scatterlist *sg;
> +       unsigned int sg_page_sizes;
> +       unsigned long n_pages;
> +
> +       GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.min_size));
> +       GEM_BUG_ON(!list_empty(&obj->blocks));
> +
> +       st = kmalloc(sizeof(*st), GFP_KERNEL);
> +       if (!st)
> +               return -ENOMEM;
> +
> +       n_pages = div64_u64(size, mem->mm.min_size);

min_size is a power of two.

n_pages = size >> ilog2(min_size);

would suffice. Keeping min_order would come in handy later.

> +
> +       if (sg_alloc_table(st, n_pages, GFP_KERNEL)) {
> +               kfree(st);
> +               return -ENOMEM;
> +       }
> +
> +       sg = st->sgl;
> +       st->nents = 0;
> +       sg_page_sizes = 0;
> +
> +       mutex_lock(&mem->mm_lock);
> +
> +       do {
> +               struct i915_gem_buddy_block *block;
> +               unsigned int order;
> +               u64 block_size;
> +               u64 offset;
> +
> +               order = fls(n_pages) - 1;
> +               GEM_BUG_ON(order > mem->mm.max_order);
> +
> +               do {
> +                       block = i915_gem_buddy_alloc(&mem->mm, order);
> +                       if (!IS_ERR(block))
> +                               break;
> +
> +                       /* XXX: some kind of eviction pass, local to the device */
> +                       if (!order--)
> +                               goto err_free_blocks;
> +               } while (1);
> +
> +               n_pages -= 1 << order;

BIT(order) so you don't have sign extension fun. Need to fix
i915_gem_internal.c!

> +struct intel_memory_region_ops {
> +       unsigned int flags;
> +
> +       int (*init)(struct intel_memory_region *);
> +       void (*release)(struct intel_memory_region *);
> +
> +       struct drm_i915_gem_object *
> +       (*object_create)(struct intel_memory_region *,
> +                        resource_size_t,
> +                        unsigned int);

create_object()

ops is acting as a factory here; and we are not operating on the object
itself.

> +static int igt_mock_fill(void *arg)
> +{
> +       struct intel_memory_region *mem = arg;
> +       resource_size_t total = resource_size(&mem->region);
> +       resource_size_t page_size;
> +       resource_size_t rem;
> +       unsigned long max_pages;
> +       unsigned long page_num;
> +       LIST_HEAD(objects);
> +       int err = 0;
> +
> +       page_size = mem->mm.min_size;
> +       max_pages = total / page_size;

Hmm, 32b? Can resource_size_t be 64b on a 32b system?

It must be to accommodate PAE.

> +static struct drm_i915_gem_object *
> +mock_object_create(struct intel_memory_region *mem,
> +                  resource_size_t size,
> +                  unsigned int flags)
> +{
> +       struct drm_i915_private *i915 = mem->i915;
> +       struct drm_i915_gem_object *obj;
> +
> +       if (size > BIT(mem->mm.max_order) * mem->mm.min_size)

A mix of 64b and 32b types.

if (size >> mem->mm.max_order + mem->mm.min_order)

> +               return ERR_PTR(-E2BIG);

GEM_BUG_ON(overflows_type(size, obj->base.size);

> +       obj = i915_gem_object_alloc(i915);
> +       if (!obj)
> +               return ERR_PTR(-ENOMEM);
> +
> +       drm_gem_private_object_init(&i915->drm, &obj->base, size);
> +       i915_gem_object_init(obj, &mock_region_obj_ops);
> +
> +       obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
> +       obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;

i915_gem_object_set_cache_coherency() ?
-Chris


More information about the Intel-gfx mailing list