[radeon-alex:drm-next-5.2-wip 19/42] drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:87:20: error: field 'mirror' has incomplete type

kbuild test robot lkp at intel.com
Wed Apr 3 18:26:17 UTC 2019


tree:   git://people.freedesktop.org/~agd5f/linux.git drm-next-5.2-wip
head:   5666aea3ea494d4dd96df8f092cab32dbeeac321
commit: ba5896bd6a1a175b21609c6c81dea9813cbf296c [19/42] drm/amdgpu: use HMM callback to replace mmu notifier
config: xtensa-allyesconfig (attached as .config)
compiler: xtensa-linux-gcc (GCC) 8.1.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        git checkout ba5896bd6a1a175b21609c6c81dea9813cbf296c
        # save the attached .config to linux build tree
        GCC_VERSION=8.1.0 make.cross ARCH=xtensa 

Note: the radeon-alex/drm-next-5.2-wip HEAD 5666aea3ea494d4dd96df8f092cab32dbeeac321 builds fine.
      It only hurts bisectibility.

All error/warnings (new ones prefixed by >>):

>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:87:20: error: field 'mirror' has incomplete type
     struct hmm_mirror mirror;
                       ^~~~~~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c: In function 'amdgpu_mn_destroy':
>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:131:2: error: implicit declaration of function 'hmm_mirror_unregister'; did you mean 'drm_dp_aux_unregister'? [-Werror=implicit-function-declaration]
     hmm_mirror_unregister(&amn->mirror);
     ^~~~~~~~~~~~~~~~~~~~~
     drm_dp_aux_unregister
   In file included from include/linux/firmware.h:6,
                    from drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:46:
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c: In function 'amdgpu_hmm_mirror_release':
>> include/linux/kernel.h:979:32: error: dereferencing pointer to incomplete type 'struct hmm_mirror'
     BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
                                   ^~~~~~
   include/linux/compiler.h:324:9: note: in definition of macro '__compiletime_assert'
      if (!(condition))     \
            ^~~~~~~~~
   include/linux/compiler.h:344:2: note: in expansion of macro '_compiletime_assert'
     _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
     ^~~~~~~~~~~~~~~~~~~
   include/linux/build_bug.h:39:37: note: in expansion of macro 'compiletime_assert'
    #define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
                                        ^~~~~~~~~~~~~~~~~~
   include/linux/kernel.h:979:2: note: in expansion of macro 'BUILD_BUG_ON_MSG'
     BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
     ^~~~~~~~~~~~~~~~
   include/linux/kernel.h:979:20: note: in expansion of macro '__same_type'
     BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
                       ^~~~~~~~~~~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:144:26: note: in expansion of macro 'container_of'
     struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
                             ^~~~~~~~~~~~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c: At top level:
>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:238:17: warning: 'struct hmm_update' declared inside parameter list will not be visible outside of this definition or declaration
       const struct hmm_update *update)
                    ^~~~~~~~~~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c: In function 'amdgpu_mn_sync_pagetables_gfx':
>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:241:30: error: dereferencing pointer to incomplete type 'const struct hmm_update'
     unsigned long start = update->start;
                                 ^~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c: At top level:
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:286:17: warning: 'struct hmm_update' declared inside parameter list will not be visible outside of this definition or declaration
       const struct hmm_update *update)
                    ^~~~~~~~~~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c: In function 'amdgpu_mn_sync_pagetables_hsa':
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:289:30: error: dereferencing pointer to incomplete type 'const struct hmm_update'
     unsigned long start = update->start;
                                 ^~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c: At top level:
>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:333:30: error: array type has incomplete element type 'struct hmm_mirror_ops'
    static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
                                 ^~~~~~~~~~~~~~~~~~~~~
>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:334:3: error: array index in non-array initializer
     [AMDGPU_MN_TYPE_GFX] = {
      ^~~~~~~~~~~~~~~~~~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:334:3: note: (near initialization for 'amdgpu_hmm_mirror_ops')
>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:335:3: error: field name not in record or union initializer
      .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
      ^
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:335:3: note: (near initialization for 'amdgpu_hmm_mirror_ops')
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:336:3: error: field name not in record or union initializer
      .release = amdgpu_hmm_mirror_release
      ^
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:336:3: note: (near initialization for 'amdgpu_hmm_mirror_ops')
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:338:3: error: array index in non-array initializer
     [AMDGPU_MN_TYPE_HSA] = {
      ^~~~~~~~~~~~~~~~~~
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:338:3: note: (near initialization for 'amdgpu_hmm_mirror_ops')
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:339:3: error: field name not in record or union initializer
      .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
      ^
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:339:3: note: (near initialization for 'amdgpu_hmm_mirror_ops')
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:340:3: error: field name not in record or union initializer
      .release = amdgpu_hmm_mirror_release
      ^
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:340:3: note: (near initialization for 'amdgpu_hmm_mirror_ops')
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c: In function 'amdgpu_mn_get':
>> drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:383:6: error: implicit declaration of function 'hmm_mirror_register'; did you mean 'drm_dp_aux_register'? [-Werror=implicit-function-declaration]
     r = hmm_mirror_register(&amn->mirror, mm);
         ^~~~~~~~~~~~~~~~~~~
         drm_dp_aux_register
   At top level:
   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c:333:30: warning: 'amdgpu_hmm_mirror_ops' defined but not used [-Wunused-variable]
    static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
                                 ^~~~~~~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors

vim +/mirror +87 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c

    55	
    56	/**
    57	 * struct amdgpu_mn
    58	 *
    59	 * @adev: amdgpu device pointer
    60	 * @mm: process address space
    61	 * @type: type of MMU notifier
    62	 * @work: destruction work item
    63	 * @node: hash table node to find structure by adev and mn
    64	 * @lock: rw semaphore protecting the notifier nodes
    65	 * @objects: interval tree containing amdgpu_mn_nodes
    66	 * @mirror: HMM mirror function support
    67	 *
    68	 * Data for each amdgpu device and process address space.
    69	 */
    70	struct amdgpu_mn {
    71		/* constant after initialisation */
    72		struct amdgpu_device	*adev;
    73		struct mm_struct	*mm;
    74		enum amdgpu_mn_type	type;
    75	
    76		/* only used on destruction */
    77		struct work_struct	work;
    78	
    79		/* protected by adev->mn_lock */
    80		struct hlist_node	node;
    81	
    82		/* objects protected by lock */
    83		struct rw_semaphore	lock;
    84		struct rb_root_cached	objects;
    85	
    86		/* HMM mirror */
  > 87		struct hmm_mirror	mirror;
    88	};
    89	
    90	/**
    91	 * struct amdgpu_mn_node
    92	 *
    93	 * @it: interval node defining start-last of the affected address range
    94	 * @bos: list of all BOs in the affected address range
    95	 *
    96	 * Manages all BOs which are affected of a certain range of address space.
    97	 */
    98	struct amdgpu_mn_node {
    99		struct interval_tree_node	it;
   100		struct list_head		bos;
   101	};
   102	
   103	/**
   104	 * amdgpu_mn_destroy - destroy the HMM mirror
   105	 *
   106	 * @work: previously sheduled work item
   107	 *
   108	 * Lazy destroys the notifier from a work item
   109	 */
   110	static void amdgpu_mn_destroy(struct work_struct *work)
   111	{
   112		struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
   113		struct amdgpu_device *adev = amn->adev;
   114		struct amdgpu_mn_node *node, *next_node;
   115		struct amdgpu_bo *bo, *next_bo;
   116	
   117		mutex_lock(&adev->mn_lock);
   118		down_write(&amn->lock);
   119		hash_del(&amn->node);
   120		rbtree_postorder_for_each_entry_safe(node, next_node,
   121						     &amn->objects.rb_root, it.rb) {
   122			list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
   123				bo->mn = NULL;
   124				list_del_init(&bo->mn_list);
   125			}
   126			kfree(node);
   127		}
   128		up_write(&amn->lock);
   129		mutex_unlock(&adev->mn_lock);
   130	
 > 131		hmm_mirror_unregister(&amn->mirror);
   132		kfree(amn);
   133	}
   134	
   135	/**
   136	 * amdgpu_hmm_mirror_release - callback to notify about mm destruction
   137	 *
   138	 * @mirror: the HMM mirror (mm) this callback is about
   139	 *
   140	 * Shedule a work item to lazy destroy HMM mirror.
   141	 */
   142	static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
   143	{
 > 144		struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
   145	
   146		INIT_WORK(&amn->work, amdgpu_mn_destroy);
   147		schedule_work(&amn->work);
   148	}
   149	
   150	/**
   151	 * amdgpu_mn_lock - take the write side lock for this notifier
   152	 *
   153	 * @mn: our notifier
   154	 */
   155	void amdgpu_mn_lock(struct amdgpu_mn *mn)
   156	{
   157		if (mn)
   158			down_write(&mn->lock);
   159	}
   160	
   161	/**
   162	 * amdgpu_mn_unlock - drop the write side lock for this notifier
   163	 *
   164	 * @mn: our notifier
   165	 */
   166	void amdgpu_mn_unlock(struct amdgpu_mn *mn)
   167	{
   168		if (mn)
   169			up_write(&mn->lock);
   170	}
   171	
   172	/**
   173	 * amdgpu_mn_read_lock - take the read side lock for this notifier
   174	 *
   175	 * @amn: our notifier
   176	 */
   177	static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
   178	{
   179		if (blockable)
   180			down_read(&amn->lock);
   181		else if (!down_read_trylock(&amn->lock))
   182			return -EAGAIN;
   183	
   184		return 0;
   185	}
   186	
   187	/**
   188	 * amdgpu_mn_read_unlock - drop the read side lock for this notifier
   189	 *
   190	 * @amn: our notifier
   191	 */
   192	static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
   193	{
   194		up_read(&amn->lock);
   195	}
   196	
   197	/**
   198	 * amdgpu_mn_invalidate_node - unmap all BOs of a node
   199	 *
   200	 * @node: the node with the BOs to unmap
   201	 * @start: start of address range affected
   202	 * @end: end of address range affected
   203	 *
   204	 * Block for operations on BOs to finish and mark pages as accessed and
   205	 * potentially dirty.
   206	 */
   207	static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
   208					      unsigned long start,
   209					      unsigned long end)
   210	{
   211		struct amdgpu_bo *bo;
   212		long r;
   213	
   214		list_for_each_entry(bo, &node->bos, mn_list) {
   215	
   216			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
   217				continue;
   218	
   219			r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
   220				true, false, MAX_SCHEDULE_TIMEOUT);
   221			if (r <= 0)
   222				DRM_ERROR("(%ld) failed to wait for user bo\n", r);
   223	
   224			amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
   225		}
   226	}
   227	
   228	/**
   229	 * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
   230	 *
   231	 * @mirror: the hmm_mirror (mm) is about to update
   232	 * @update: the update start, end address
   233	 *
   234	 * Block for operations on BOs to finish and mark pages as accessed and
   235	 * potentially dirty.
   236	 */
   237	static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
 > 238				const struct hmm_update *update)
   239	{
   240		struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
 > 241		unsigned long start = update->start;
   242		unsigned long end = update->end;
   243		bool blockable = update->blockable;
   244		struct interval_tree_node *it;
   245	
   246		/* notification is exclusive, but interval is inclusive */
   247		end -= 1;
   248	
   249		/* TODO we should be able to split locking for interval tree and
   250		 * amdgpu_mn_invalidate_node
   251		 */
   252		if (amdgpu_mn_read_lock(amn, blockable))
   253			return -EAGAIN;
   254	
   255		it = interval_tree_iter_first(&amn->objects, start, end);
   256		while (it) {
   257			struct amdgpu_mn_node *node;
   258	
   259			if (!blockable) {
   260				amdgpu_mn_read_unlock(amn);
   261				return -EAGAIN;
   262			}
   263	
   264			node = container_of(it, struct amdgpu_mn_node, it);
   265			it = interval_tree_iter_next(it, start, end);
   266	
   267			amdgpu_mn_invalidate_node(node, start, end);
   268		}
   269	
   270		amdgpu_mn_read_unlock(amn);
   271	
   272		return 0;
   273	}
   274	
   275	/**
   276	 * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
   277	 *
   278	 * @mirror: the hmm_mirror (mm) is about to update
   279	 * @update: the update start, end address
   280	 *
   281	 * We temporarily evict all BOs between start and end. This
   282	 * necessitates evicting all user-mode queues of the process. The BOs
   283	 * are restorted in amdgpu_mn_invalidate_range_end_hsa.
   284	 */
   285	static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
 > 286				const struct hmm_update *update)
   287	{
   288		struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
   289		unsigned long start = update->start;
   290		unsigned long end = update->end;
   291		bool blockable = update->blockable;
   292		struct interval_tree_node *it;
   293	
   294		/* notification is exclusive, but interval is inclusive */
   295		end -= 1;
   296	
   297		if (amdgpu_mn_read_lock(amn, blockable))
   298			return -EAGAIN;
   299	
   300		it = interval_tree_iter_first(&amn->objects, start, end);
   301		while (it) {
   302			struct amdgpu_mn_node *node;
   303			struct amdgpu_bo *bo;
   304	
   305			if (!blockable) {
   306				amdgpu_mn_read_unlock(amn);
   307				return -EAGAIN;
   308			}
   309	
   310			node = container_of(it, struct amdgpu_mn_node, it);
   311			it = interval_tree_iter_next(it, start, end);
   312	
   313			list_for_each_entry(bo, &node->bos, mn_list) {
   314				struct kgd_mem *mem = bo->kfd_bo;
   315	
   316				if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
   317								 start, end))
   318					amdgpu_amdkfd_evict_userptr(mem, amn->mm);
   319			}
   320		}
   321	
   322		amdgpu_mn_read_unlock(amn);
   323	
   324		return 0;
   325	}
   326	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
-------------- next part --------------
A non-text attachment was scrubbed...
Name: .config.gz
Type: application/gzip
Size: 56816 bytes
Desc: not available
URL: <https://lists.freedesktop.org/archives/dri-devel/attachments/20190404/9e380a14/attachment-0001.gz>


More information about the dri-devel mailing list