[Intel-gfx] [PATCH 24/45] drm/i915: Split GEM object type definition to its own header

Joonas Lahtinen joonas.lahtinen at linux.intel.com
Mon Apr 29 09:36:30 UTC 2019


Quoting Jani Nikula (2019-04-26 15:12:49)
> On Thu, 25 Apr 2019, Chris Wilson <chris at chris-wilson.co.uk> wrote:
> > For convenience in avoiding inline spaghetti, keep the type definition
> > as a separate header.
> >
> > Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> > Reviewed-by: Matthew Auld <matthew.auld at intel.com>
> > ---
> >  drivers/gpu/drm/i915/Makefile                 |   1 +
> >  drivers/gpu/drm/i915/gem/Makefile             |   1 +
> >  drivers/gpu/drm/i915/gem/Makefile.header-test |  16 +
> >  .../gpu/drm/i915/gem/i915_gem_object_types.h  | 285 +++++++++++++++++
> >  drivers/gpu/drm/i915/gt/intel_engine_types.h  |   1 +
> >  drivers/gpu/drm/i915/i915_drv.h               |   3 +-
> >  drivers/gpu/drm/i915/i915_gem_batch_pool.h    |   3 +-
> >  drivers/gpu/drm/i915/i915_gem_gtt.h           |   1 +
> >  drivers/gpu/drm/i915/i915_gem_object.h        | 295 +-----------------
> >  9 files changed, 312 insertions(+), 294 deletions(-)
> >  create mode 100644 drivers/gpu/drm/i915/gem/Makefile
> >  create mode 100644 drivers/gpu/drm/i915/gem/Makefile.header-test
> >  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> >
> > diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> > index 58643373495c..def781c9ea69 100644
> > --- a/drivers/gpu/drm/i915/Makefile
> > +++ b/drivers/gpu/drm/i915/Makefile
> > @@ -85,6 +85,7 @@ gt-$(CONFIG_DRM_I915_SELFTEST) += \
> >  i915-y += $(gt-y)
> >  
> >  # GEM (Graphics Execution Management) code
> > +obj-y += gem/
> 
> I think this warrants similar treatment as gt/. Posted standalone
> instead of hidden in a series, explicit acks.
> 
> Provided the split makes sense to Joonas,
> 
> Acked-by: Jani Nikula <jani.nikula at intel.com>

Yeah, makes sense to me.

Acked-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>

Regards, Joonas

> 
> 
> >  i915-y += \
> >         i915_active.o \
> >         i915_cmd_parser.o \
> > diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
> > new file mode 100644
> > index 000000000000..07e7b8b840ea
> > --- /dev/null
> > +++ b/drivers/gpu/drm/i915/gem/Makefile
> > @@ -0,0 +1 @@
> > +include $(src)/Makefile.header-test # Extra header tests
> > diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test
> > new file mode 100644
> > index 000000000000..61e06cbb4b32
> > --- /dev/null
> > +++ b/drivers/gpu/drm/i915/gem/Makefile.header-test
> > @@ -0,0 +1,16 @@
> > +# SPDX-License-Identifier: MIT
> > +# Copyright © 2019 Intel Corporation
> > +
> > +# Test the headers are compilable as standalone units
> > +header_test := $(notdir $(wildcard $(src)/*.h))
> > +
> > +quiet_cmd_header_test = HDRTEST $@
> > +      cmd_header_test = echo "\#include \"$(<F)\"" > $@
> > +
> > +header_test_%.c: %.h
> > +     $(call cmd,header_test)
> > +
> > +extra-$(CONFIG_DRM_I915_WERROR) += \
> > +     $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
> > +
> > +clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> > new file mode 100644
> > index 000000000000..e4b50944f553
> > --- /dev/null
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> > @@ -0,0 +1,285 @@
> > +/*
> > + * SPDX-License-Identifier: MIT
> > + *
> > + * Copyright © 2016 Intel Corporation
> > + */
> > +
> > +#ifndef __I915_GEM_OBJECT_TYPES_H__
> > +#define __I915_GEM_OBJECT_TYPES_H__
> > +
> > +#include <linux/reservation.h>
> > +
> > +#include <drm/drm_gem.h>
> > +
> > +#include "../i915_active.h"
> > +#include "../i915_selftest.h"
> > +
> > +struct drm_i915_gem_object;
> > +
> > +/*
> > + * struct i915_lut_handle tracks the fast lookups from handle to vma used
> > + * for execbuf. Although we use a radixtree for that mapping, in order to
> > + * remove them as the object or context is closed, we need a secondary list
> > + * and a translation entry (i915_lut_handle).
> > + */
> > +struct i915_lut_handle {
> > +     struct list_head obj_link;
> > +     struct list_head ctx_link;
> > +     struct i915_gem_context *ctx;
> > +     u32 handle;
> > +};
> > +
> > +struct drm_i915_gem_object_ops {
> > +     unsigned int flags;
> > +#define I915_GEM_OBJECT_HAS_STRUCT_PAGE      BIT(0)
> > +#define I915_GEM_OBJECT_IS_SHRINKABLE        BIT(1)
> > +#define I915_GEM_OBJECT_IS_PROXY     BIT(2)
> > +#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
> > +
> > +     /* Interface between the GEM object and its backing storage.
> > +      * get_pages() is called once prior to the use of the associated set
> > +      * of pages before to binding them into the GTT, and put_pages() is
> > +      * called after we no longer need them. As we expect there to be
> > +      * associated cost with migrating pages between the backing storage
> > +      * and making them available for the GPU (e.g. clflush), we may hold
> > +      * onto the pages after they are no longer referenced by the GPU
> > +      * in case they may be used again shortly (for example migrating the
> > +      * pages to a different memory domain within the GTT). put_pages()
> > +      * will therefore most likely be called when the object itself is
> > +      * being released or under memory pressure (where we attempt to
> > +      * reap pages for the shrinker).
> > +      */
> > +     int (*get_pages)(struct drm_i915_gem_object *obj);
> > +     void (*put_pages)(struct drm_i915_gem_object *obj,
> > +                       struct sg_table *pages);
> > +
> > +     int (*pwrite)(struct drm_i915_gem_object *obj,
> > +                   const struct drm_i915_gem_pwrite *arg);
> > +
> > +     int (*dmabuf_export)(struct drm_i915_gem_object *obj);
> > +     void (*release)(struct drm_i915_gem_object *obj);
> > +};
> > +
> > +struct drm_i915_gem_object {
> > +     struct drm_gem_object base;
> > +
> > +     const struct drm_i915_gem_object_ops *ops;
> > +
> > +     struct {
> > +             /**
> > +              * @vma.lock: protect the list/tree of vmas
> > +              */
> > +             spinlock_t lock;
> > +
> > +             /**
> > +              * @vma.list: List of VMAs backed by this object
> > +              *
> > +              * The VMA on this list are ordered by type, all GGTT vma are
> > +              * placed at the head and all ppGTT vma are placed at the tail.
> > +              * The different types of GGTT vma are unordered between
> > +              * themselves, use the @vma.tree (which has a defined order
> > +              * between all VMA) to quickly find an exact match.
> > +              */
> > +             struct list_head list;
> > +
> > +             /**
> > +              * @vma.tree: Ordered tree of VMAs backed by this object
> > +              *
> > +              * All VMA created for this object are placed in the @vma.tree
> > +              * for fast retrieval via a binary search in
> > +              * i915_vma_instance(). They are also added to @vma.list for
> > +              * easy iteration.
> > +              */
> > +             struct rb_root tree;
> > +     } vma;
> > +
> > +     /**
> > +      * @lut_list: List of vma lookup entries in use for this object.
> > +      *
> > +      * If this object is closed, we need to remove all of its VMA from
> > +      * the fast lookup index in associated contexts; @lut_list provides
> > +      * this translation from object to context->handles_vma.
> > +      */
> > +     struct list_head lut_list;
> > +
> > +     /** Stolen memory for this object, instead of being backed by shmem. */
> > +     struct drm_mm_node *stolen;
> > +     union {
> > +             struct rcu_head rcu;
> > +             struct llist_node freed;
> > +     };
> > +
> > +     /**
> > +      * Whether the object is currently in the GGTT mmap.
> > +      */
> > +     unsigned int userfault_count;
> > +     struct list_head userfault_link;
> > +
> > +     struct list_head batch_pool_link;
> > +     I915_SELFTEST_DECLARE(struct list_head st_link);
> > +
> > +     unsigned long flags;
> > +
> > +     /**
> > +      * Have we taken a reference for the object for incomplete GPU
> > +      * activity?
> > +      */
> > +#define I915_BO_ACTIVE_REF 0
> > +
> > +     /*
> > +      * Is the object to be mapped as read-only to the GPU
> > +      * Only honoured if hardware has relevant pte bit
> > +      */
> > +     unsigned int cache_level:3;
> > +     unsigned int cache_coherent:2;
> > +#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
> > +#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
> > +     unsigned int cache_dirty:1;
> > +
> > +     /**
> > +      * @read_domains: Read memory domains.
> > +      *
> > +      * These monitor which caches contain read/write data related to the
> > +      * object. When transitioning from one set of domains to another,
> > +      * the driver is called to ensure that caches are suitably flushed and
> > +      * invalidated.
> > +      */
> > +     u16 read_domains;
> > +
> > +     /**
> > +      * @write_domain: Corresponding unique write memory domain.
> > +      */
> > +     u16 write_domain;
> > +
> > +     atomic_t frontbuffer_bits;
> > +     unsigned int frontbuffer_ggtt_origin; /* write once */
> > +     struct i915_active_request frontbuffer_write;
> > +
> > +     /** Current tiling stride for the object, if it's tiled. */
> > +     unsigned int tiling_and_stride;
> > +#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
> > +#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
> > +#define STRIDE_MASK (~TILING_MASK)
> > +
> > +     /** Count of VMA actually bound by this object */
> > +     unsigned int bind_count;
> > +     unsigned int active_count;
> > +     /** Count of how many global VMA are currently pinned for use by HW */
> > +     unsigned int pin_global;
> > +
> > +     struct {
> > +             struct mutex lock; /* protects the pages and their use */
> > +             atomic_t pages_pin_count;
> > +
> > +             struct sg_table *pages;
> > +             void *mapping;
> > +
> > +             /* TODO: whack some of this into the error state */
> > +             struct i915_page_sizes {
> > +                     /**
> > +                      * The sg mask of the pages sg_table. i.e the mask of
> > +                      * of the lengths for each sg entry.
> > +                      */
> > +                     unsigned int phys;
> > +
> > +                     /**
> > +                      * The gtt page sizes we are allowed to use given the
> > +                      * sg mask and the supported page sizes. This will
> > +                      * express the smallest unit we can use for the whole
> > +                      * object, as well as the larger sizes we may be able
> > +                      * to use opportunistically.
> > +                      */
> > +                     unsigned int sg;
> > +
> > +                     /**
> > +                      * The actual gtt page size usage. Since we can have
> > +                      * multiple vma associated with this object we need to
> > +                      * prevent any trampling of state, hence a copy of this
> > +                      * struct also lives in each vma, therefore the gtt
> > +                      * value here should only be read/write through the vma.
> > +                      */
> > +                     unsigned int gtt;
> > +             } page_sizes;
> > +
> > +             I915_SELFTEST_DECLARE(unsigned int page_mask);
> > +
> > +             struct i915_gem_object_page_iter {
> > +                     struct scatterlist *sg_pos;
> > +                     unsigned int sg_idx; /* in pages, but 32bit eek! */
> > +
> > +                     struct radix_tree_root radix;
> > +                     struct mutex lock; /* protects this cache */
> > +             } get_page;
> > +
> > +             /**
> > +              * Element within i915->mm.unbound_list or i915->mm.bound_list,
> > +              * locked by i915->mm.obj_lock.
> > +              */
> > +             struct list_head link;
> > +
> > +             /**
> > +              * Advice: are the backing pages purgeable?
> > +              */
> > +             unsigned int madv:2;
> > +
> > +             /**
> > +              * This is set if the object has been written to since the
> > +              * pages were last acquired.
> > +              */
> > +             bool dirty:1;
> > +
> > +             /**
> > +              * This is set if the object has been pinned due to unknown
> > +              * swizzling.
> > +              */
> > +             bool quirked:1;
> > +     } mm;
> > +
> > +     /** Breadcrumb of last rendering to the buffer.
> > +      * There can only be one writer, but we allow for multiple readers.
> > +      * If there is a writer that necessarily implies that all other
> > +      * read requests are complete - but we may only be lazily clearing
> > +      * the read requests. A read request is naturally the most recent
> > +      * request on a ring, so we may have two different write and read
> > +      * requests on one ring where the write request is older than the
> > +      * read request. This allows for the CPU to read from an active
> > +      * buffer by only waiting for the write to complete.
> > +      */
> > +     struct reservation_object *resv;
> > +
> > +     /** References from framebuffers, locks out tiling changes. */
> > +     unsigned int framebuffer_references;
> > +
> > +     /** Record of address bit 17 of each page at last unbind. */
> > +     unsigned long *bit_17;
> > +
> > +     union {
> > +             struct i915_gem_userptr {
> > +                     uintptr_t ptr;
> > +
> > +                     struct i915_mm_struct *mm;
> > +                     struct i915_mmu_object *mmu_object;
> > +                     struct work_struct *work;
> > +             } userptr;
> > +
> > +             unsigned long scratch;
> > +
> > +             void *gvt_info;
> > +     };
> > +
> > +     /** for phys allocated objects */
> > +     struct drm_dma_handle *phys_handle;
> > +
> > +     struct reservation_object __builtin_resv;
> > +};
> > +
> > +static inline struct drm_i915_gem_object *
> > +to_intel_bo(struct drm_gem_object *gem)
> > +{
> > +     /* Assert that to_intel_bo(NULL) == NULL */
> > +     BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
> > +
> > +     return container_of(gem, struct drm_i915_gem_object, base);
> > +}
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > index ca8d95a5708d..ae73c6596d08 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > @@ -29,6 +29,7 @@
> >  #define I915_CMD_HASH_ORDER 9
> >  
> >  struct dma_fence;
> > +struct drm_i915_gem_object;
> >  struct drm_i915_reg_table;
> >  struct i915_gem_context;
> >  struct i915_request;
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > index 5ca4df9a7428..0cf87495e11b 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -80,7 +80,6 @@
> >  #include "i915_gem.h"
> >  #include "i915_gem_context.h"
> >  #include "i915_gem_fence_reg.h"
> > -#include "i915_gem_object.h"
> >  #include "i915_gem_gtt.h"
> >  #include "i915_gpu_error.h"
> >  #include "i915_request.h"
> > @@ -135,6 +134,8 @@ bool i915_error_injected(void);
> >       __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
> >                     fmt, ##__VA_ARGS__)
> >  
> > +struct drm_i915_gem_object;
> > +
> >  enum hpd_pin {
> >       HPD_NONE = 0,
> >       HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
> > diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
> > index 56947daaaf65..feeeeeaa54d8 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
> > +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
> > @@ -9,6 +9,7 @@
> >  
> >  #include <linux/types.h>
> >  
> > +struct drm_i915_gem_object;
> >  struct intel_engine_cs;
> >  
> >  struct i915_gem_batch_pool {
> > @@ -19,7 +20,7 @@ struct i915_gem_batch_pool {
> >  void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
> >                             struct intel_engine_cs *engine);
> >  void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
> > -struct drm_i915_gem_object*
> > +struct drm_i915_gem_object *
> >  i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
> >  
> >  #endif /* I915_GEM_BATCH_POOL_H */
> > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> > index 2fafa04c45ec..593079f30fbe 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> > @@ -61,6 +61,7 @@
> >  
> >  struct drm_i915_file_private;
> >  struct drm_i915_fence_reg;
> > +struct drm_i915_gem_object;
> >  struct i915_vma;
> >  
> >  typedef u32 gen6_pte_t;
> > diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
> > index ca93a40c0c87..3666b0c5f6ca 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_object.h
> > +++ b/drivers/gpu/drm/i915/i915_gem_object.h
> > @@ -1,308 +1,19 @@
> >  /*
> > - * Copyright © 2016 Intel Corporation
> > - *
> > - * Permission is hereby granted, free of charge, to any person obtaining a
> > - * copy of this software and associated documentation files (the "Software"),
> > - * to deal in the Software without restriction, including without limitation
> > - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> > - * and/or sell copies of the Software, and to permit persons to whom the
> > - * Software is furnished to do so, subject to the following conditions:
> > - *
> > - * The above copyright notice and this permission notice (including the next
> > - * paragraph) shall be included in all copies or substantial portions of the
> > - * Software.
> > - *
> > - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> > - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> > - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> > - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> > - * IN THE SOFTWARE.
> > + * SPDX-License-Identifier: MIT
> >   *
> > + * Copyright © 2016 Intel Corporation
> >   */
> >  
> >  #ifndef __I915_GEM_OBJECT_H__
> >  #define __I915_GEM_OBJECT_H__
> >  
> > -#include <linux/reservation.h>
> > -
> > -#include <drm/drm_vma_manager.h>
> >  #include <drm/drm_gem.h>
> >  #include <drm/drm_file.h>
> >  #include <drm/drm_device.h>
> >  
> >  #include <drm/i915_drm.h>
> >  
> > -#include "i915_request.h"
> > -#include "i915_selftest.h"
> > -
> > -struct drm_i915_gem_object;
> > -
> > -/*
> > - * struct i915_lut_handle tracks the fast lookups from handle to vma used
> > - * for execbuf. Although we use a radixtree for that mapping, in order to
> > - * remove them as the object or context is closed, we need a secondary list
> > - * and a translation entry (i915_lut_handle).
> > - */
> > -struct i915_lut_handle {
> > -     struct list_head obj_link;
> > -     struct list_head ctx_link;
> > -     struct i915_gem_context *ctx;
> > -     u32 handle;
> > -};
> > -
> > -struct drm_i915_gem_object_ops {
> > -     unsigned int flags;
> > -#define I915_GEM_OBJECT_HAS_STRUCT_PAGE      BIT(0)
> > -#define I915_GEM_OBJECT_IS_SHRINKABLE        BIT(1)
> > -#define I915_GEM_OBJECT_IS_PROXY     BIT(2)
> > -#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
> > -
> > -     /* Interface between the GEM object and its backing storage.
> > -      * get_pages() is called once prior to the use of the associated set
> > -      * of pages before to binding them into the GTT, and put_pages() is
> > -      * called after we no longer need them. As we expect there to be
> > -      * associated cost with migrating pages between the backing storage
> > -      * and making them available for the GPU (e.g. clflush), we may hold
> > -      * onto the pages after they are no longer referenced by the GPU
> > -      * in case they may be used again shortly (for example migrating the
> > -      * pages to a different memory domain within the GTT). put_pages()
> > -      * will therefore most likely be called when the object itself is
> > -      * being released or under memory pressure (where we attempt to
> > -      * reap pages for the shrinker).
> > -      */
> > -     int (*get_pages)(struct drm_i915_gem_object *);
> > -     void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
> > -
> > -     int (*pwrite)(struct drm_i915_gem_object *,
> > -                   const struct drm_i915_gem_pwrite *);
> > -
> > -     int (*dmabuf_export)(struct drm_i915_gem_object *);
> > -     void (*release)(struct drm_i915_gem_object *);
> > -};
> > -
> > -struct drm_i915_gem_object {
> > -     struct drm_gem_object base;
> > -
> > -     const struct drm_i915_gem_object_ops *ops;
> > -
> > -     struct {
> > -             /**
> > -              * @vma.lock: protect the list/tree of vmas
> > -              */
> > -             spinlock_t lock;
> > -
> > -             /**
> > -              * @vma.list: List of VMAs backed by this object
> > -              *
> > -              * The VMA on this list are ordered by type, all GGTT vma are
> > -              * placed at the head and all ppGTT vma are placed at the tail.
> > -              * The different types of GGTT vma are unordered between
> > -              * themselves, use the @vma.tree (which has a defined order
> > -              * between all VMA) to quickly find an exact match.
> > -              */
> > -             struct list_head list;
> > -
> > -             /**
> > -              * @vma.tree: Ordered tree of VMAs backed by this object
> > -              *
> > -              * All VMA created for this object are placed in the @vma.tree
> > -              * for fast retrieval via a binary search in
> > -              * i915_vma_instance(). They are also added to @vma.list for
> > -              * easy iteration.
> > -              */
> > -             struct rb_root tree;
> > -     } vma;
> > -
> > -     /**
> > -      * @lut_list: List of vma lookup entries in use for this object.
> > -      *
> > -      * If this object is closed, we need to remove all of its VMA from
> > -      * the fast lookup index in associated contexts; @lut_list provides
> > -      * this translation from object to context->handles_vma.
> > -      */
> > -     struct list_head lut_list;
> > -
> > -     /** Stolen memory for this object, instead of being backed by shmem. */
> > -     struct drm_mm_node *stolen;
> > -     union {
> > -             struct rcu_head rcu;
> > -             struct llist_node freed;
> > -     };
> > -
> > -     /**
> > -      * Whether the object is currently in the GGTT mmap.
> > -      */
> > -     unsigned int userfault_count;
> > -     struct list_head userfault_link;
> > -
> > -     struct list_head batch_pool_link;
> > -     I915_SELFTEST_DECLARE(struct list_head st_link);
> > -
> > -     unsigned long flags;
> > -
> > -     /**
> > -      * Have we taken a reference for the object for incomplete GPU
> > -      * activity?
> > -      */
> > -#define I915_BO_ACTIVE_REF 0
> > -
> > -     /*
> > -      * Is the object to be mapped as read-only to the GPU
> > -      * Only honoured if hardware has relevant pte bit
> > -      */
> > -     unsigned int cache_level:3;
> > -     unsigned int cache_coherent:2;
> > -#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
> > -#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
> > -     unsigned int cache_dirty:1;
> > -
> > -     /**
> > -      * @read_domains: Read memory domains.
> > -      *
> > -      * These monitor which caches contain read/write data related to the
> > -      * object. When transitioning from one set of domains to another,
> > -      * the driver is called to ensure that caches are suitably flushed and
> > -      * invalidated.
> > -      */
> > -     u16 read_domains;
> > -
> > -     /**
> > -      * @write_domain: Corresponding unique write memory domain.
> > -      */
> > -     u16 write_domain;
> > -
> > -     atomic_t frontbuffer_bits;
> > -     unsigned int frontbuffer_ggtt_origin; /* write once */
> > -     struct i915_active_request frontbuffer_write;
> > -
> > -     /** Current tiling stride for the object, if it's tiled. */
> > -     unsigned int tiling_and_stride;
> > -#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
> > -#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
> > -#define STRIDE_MASK (~TILING_MASK)
> > -
> > -     /** Count of VMA actually bound by this object */
> > -     unsigned int bind_count;
> > -     unsigned int active_count;
> > -     /** Count of how many global VMA are currently pinned for use by HW */
> > -     unsigned int pin_global;
> > -
> > -     struct {
> > -             struct mutex lock; /* protects the pages and their use */
> > -             atomic_t pages_pin_count;
> > -
> > -             struct sg_table *pages;
> > -             void *mapping;
> > -
> > -             /* TODO: whack some of this into the error state */
> > -             struct i915_page_sizes {
> > -                     /**
> > -                      * The sg mask of the pages sg_table. i.e the mask of
> > -                      * of the lengths for each sg entry.
> > -                      */
> > -                     unsigned int phys;
> > -
> > -                     /**
> > -                      * The gtt page sizes we are allowed to use given the
> > -                      * sg mask and the supported page sizes. This will
> > -                      * express the smallest unit we can use for the whole
> > -                      * object, as well as the larger sizes we may be able
> > -                      * to use opportunistically.
> > -                      */
> > -                     unsigned int sg;
> > -
> > -                     /**
> > -                      * The actual gtt page size usage. Since we can have
> > -                      * multiple vma associated with this object we need to
> > -                      * prevent any trampling of state, hence a copy of this
> > -                      * struct also lives in each vma, therefore the gtt
> > -                      * value here should only be read/write through the vma.
> > -                      */
> > -                     unsigned int gtt;
> > -             } page_sizes;
> > -
> > -             I915_SELFTEST_DECLARE(unsigned int page_mask);
> > -
> > -             struct i915_gem_object_page_iter {
> > -                     struct scatterlist *sg_pos;
> > -                     unsigned int sg_idx; /* in pages, but 32bit eek! */
> > -
> > -                     struct radix_tree_root radix;
> > -                     struct mutex lock; /* protects this cache */
> > -             } get_page;
> > -
> > -             /**
> > -              * Element within i915->mm.unbound_list or i915->mm.bound_list,
> > -              * locked by i915->mm.obj_lock.
> > -              */
> > -             struct list_head link;
> > -
> > -             /**
> > -              * Advice: are the backing pages purgeable?
> > -              */
> > -             unsigned int madv:2;
> > -
> > -             /**
> > -              * This is set if the object has been written to since the
> > -              * pages were last acquired.
> > -              */
> > -             bool dirty:1;
> > -
> > -             /**
> > -              * This is set if the object has been pinned due to unknown
> > -              * swizzling.
> > -              */
> > -             bool quirked:1;
> > -     } mm;
> > -
> > -     /** Breadcrumb of last rendering to the buffer.
> > -      * There can only be one writer, but we allow for multiple readers.
> > -      * If there is a writer that necessarily implies that all other
> > -      * read requests are complete - but we may only be lazily clearing
> > -      * the read requests. A read request is naturally the most recent
> > -      * request on a ring, so we may have two different write and read
> > -      * requests on one ring where the write request is older than the
> > -      * read request. This allows for the CPU to read from an active
> > -      * buffer by only waiting for the write to complete.
> > -      */
> > -     struct reservation_object *resv;
> > -
> > -     /** References from framebuffers, locks out tiling changes. */
> > -     unsigned int framebuffer_references;
> > -
> > -     /** Record of address bit 17 of each page at last unbind. */
> > -     unsigned long *bit_17;
> > -
> > -     union {
> > -             struct i915_gem_userptr {
> > -                     uintptr_t ptr;
> > -
> > -                     struct i915_mm_struct *mm;
> > -                     struct i915_mmu_object *mmu_object;
> > -                     struct work_struct *work;
> > -             } userptr;
> > -
> > -             unsigned long scratch;
> > -
> > -             void *gvt_info;
> > -     };
> > -
> > -     /** for phys allocated objects */
> > -     struct drm_dma_handle *phys_handle;
> > -
> > -     struct reservation_object __builtin_resv;
> > -};
> > -
> > -static inline struct drm_i915_gem_object *
> > -to_intel_bo(struct drm_gem_object *gem)
> > -{
> > -     /* Assert that to_intel_bo(NULL) == NULL */
> > -     BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
> > -
> > -     return container_of(gem, struct drm_i915_gem_object, base);
> > -}
> > +#include "gem/i915_gem_object_types.h"
> >  
> >  struct drm_i915_gem_object *i915_gem_object_alloc(void);
> >  void i915_gem_object_free(struct drm_i915_gem_object *obj);
> 
> -- 
> Jani Nikula, Intel Open Source Graphics Center


More information about the Intel-gfx mailing list