[PATCH] drm/xe: Document nested struct members according to guidelines

Thomas Hellström thomas.hellstrom at linux.intel.com
Tue Jan 23 12:08:40 UTC 2024


Document nested struct members with full names as described in
Documentation/doc-guide/kernel-doc.rst.

For this documentation we allow a column with of to make
it more readable.

This fixes warnings similar to:
drivers/gpu/drm/xe/xe_lrc_types.h:45: warning: Excess struct member 'size' description in 'xe_lrc'

Cc: Lucas De Marchi <lucas.demarchi at intel.com>
Cc: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
 drivers/gpu/drm/xe/xe_device_types.h     | 161 ++++++++++++-----------
 drivers/gpu/drm/xe/xe_exec_queue_types.h |  36 ++---
 drivers/gpu/drm/xe/xe_gsc_types.h        |  16 +--
 drivers/gpu/drm/xe/xe_gt_types.h         |  99 +++++++-------
 drivers/gpu/drm/xe/xe_guc_ct_types.h     |   4 +-
 drivers/gpu/drm/xe/xe_guc_submit_types.h |  18 +--
 drivers/gpu/drm/xe/xe_guc_types.h        |  28 ++--
 drivers/gpu/drm/xe/xe_hw_engine_types.h  |  70 +++++-----
 drivers/gpu/drm/xe/xe_lrc_types.h        |   6 +-
 drivers/gpu/drm/xe/xe_sched_job_types.h  |   6 +-
 drivers/gpu/drm/xe/xe_uc_fw_types.h      |   9 +-
 drivers/gpu/drm/xe/xe_wopcm_types.h      |   4 +-
 12 files changed, 233 insertions(+), 224 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 7eda86bd4c2a..dedb163135c8 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -143,10 +143,10 @@ struct xe_tile {
 	 * * 8MB-16MB: global GTT
 	 */
 	struct {
-		/** @size: size of tile's MMIO space */
+		/** @mmio.size: size of tile's MMIO space */
 		size_t size;
 
-		/** @regs: pointer to tile's MMIO space (starting with registers) */
+		/** @mmio.regs: pointer to tile's MMIO space (starting with registers) */
 		void __iomem *regs;
 	} mmio;
 
@@ -156,31 +156,31 @@ struct xe_tile {
 	 * Each tile has its own additional 256MB (28-bit) MMIO-extension space.
 	 */
 	struct {
-		/** @size: size of tile's additional MMIO-extension space */
+		/** @mmio_ext.size: size of tile's additional MMIO-extension space */
 		size_t size;
 
-		/** @regs: pointer to tile's additional MMIO-extension space */
+		/** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */
 		void __iomem *regs;
 	} mmio_ext;
 
 	/** @mem: memory management info for tile */
 	struct {
 		/**
-		 * @vram: VRAM info for tile.
+		 * @mem.vram: VRAM info for tile.
 		 *
 		 * Although VRAM is associated with a specific tile, it can
 		 * still be accessed by all tiles' GTs.
 		 */
 		struct xe_mem_region vram;
 
-		/** @vram_mgr: VRAM TTM manager */
+		/** @mem.vram_mgr: VRAM TTM manager */
 		struct xe_ttm_vram_mgr *vram_mgr;
 
-		/** @ggtt: Global graphics translation table */
+		/** @mem.ggtt: Global graphics translation table */
 		struct xe_ggtt *ggtt;
 
 		/**
-		 * @kernel_bb_pool: Pool from which batchbuffers are allocated.
+		 * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated.
 		 *
 		 * Media GT shares a pool with its primary GT.
 		 */
@@ -218,68 +218,68 @@ struct xe_device {
 
 	/** @info: device info */
 	struct intel_device_info {
-		/** @graphics_name: graphics IP name */
+		/** @info.graphics_name: graphics IP name */
 		const char *graphics_name;
-		/** @media_name: media IP name */
+		/** @info.media_name: media IP name */
 		const char *media_name;
-		/** @tile_mmio_ext_size: size of MMIO extension space, per-tile */
+		/** @info.tile_mmio_ext_size: size of MMIO extension space, per-tile */
 		u32 tile_mmio_ext_size;
-		/** @graphics_verx100: graphics IP version */
+		/** @info.graphics_verx100: graphics IP version */
 		u32 graphics_verx100;
-		/** @media_verx100: media IP version */
+		/** @info.media_verx100: media IP version */
 		u32 media_verx100;
-		/** @mem_region_mask: mask of valid memory regions */
+		/** @info.mem_region_mask: mask of valid memory regions */
 		u32 mem_region_mask;
-		/** @platform: XE platform enum */
+		/** @info.platform: XE platform enum */
 		enum xe_platform platform;
-		/** @subplatform: XE subplatform enum */
+		/** @info.subplatform: XE subplatform enum */
 		enum xe_subplatform subplatform;
-		/** @devid: device ID */
+		/** @info.devid: device ID */
 		u16 devid;
-		/** @revid: device revision */
+		/** @info.revid: device revision */
 		u8 revid;
-		/** @step: stepping information for each IP */
+		/** @info.step: stepping information for each IP */
 		struct xe_step_info step;
-		/** @dma_mask_size: DMA address bits */
+		/** @info.dma_mask_size: DMA address bits */
 		u8 dma_mask_size;
-		/** @vram_flags: Vram flags */
+		/** @info.vram_flags: Vram flags */
 		u8 vram_flags;
-		/** @tile_count: Number of tiles */
+		/** @info.tile_count: Number of tiles */
 		u8 tile_count;
-		/** @gt_count: Total number of GTs for entire device */
+		/** @info.gt_count: Total number of GTs for entire device */
 		u8 gt_count;
-		/** @vm_max_level: Max VM level */
+		/** @info.vm_max_level: Max VM level */
 		u8 vm_max_level;
-		/** @va_bits: Maximum bits of a virtual address */
+		/** @info.va_bits: Maximum bits of a virtual address */
 		u8 va_bits;
 
-		/** @is_dgfx: is discrete device */
+		/** @info.is_dgfx: is discrete device */
 		u8 is_dgfx:1;
-		/** @has_asid: Has address space ID */
+		/** @info.has_asid: Has address space ID */
 		u8 has_asid:1;
-		/** @force_execlist: Forced execlist submission */
+		/** @info.force_execlist: Forced execlist submission */
 		u8 force_execlist:1;
-		/** @has_flat_ccs: Whether flat CCS metadata is used */
+		/** @info.has_flat_ccs: Whether flat CCS metadata is used */
 		u8 has_flat_ccs:1;
-		/** @has_llc: Device has a shared CPU+GPU last level cache */
+		/** @info.has_llc: Device has a shared CPU+GPU last level cache */
 		u8 has_llc:1;
-		/** @has_mmio_ext: Device has extra MMIO address range */
+		/** @info.has_mmio_ext: Device has extra MMIO address range */
 		u8 has_mmio_ext:1;
-		/** @has_range_tlb_invalidation: Has range based TLB invalidations */
+		/** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
 		u8 has_range_tlb_invalidation:1;
-		/** @has_sriov: Supports SR-IOV */
+		/** @info.has_sriov: Supports SR-IOV */
 		u8 has_sriov:1;
-		/** @has_usm: Device has unified shared memory support */
+		/** @info.has_usm: Device has unified shared memory support */
 		u8 has_usm:1;
-		/** @enable_display: display enabled */
+		/** @info.enable_display: display enabled */
 		u8 enable_display:1;
-		/** @skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
+		/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
 		u8 skip_mtcfg:1;
-		/** @skip_pcode: skip access to PCODE uC */
+		/** @info.skip_pcode: skip access to PCODE uC */
 		u8 skip_pcode:1;
-		/** @has_heci_gscfi: device has heci gscfi */
+		/** @info.has_heci_gscfi: device has heci gscfi */
 		u8 has_heci_gscfi:1;
-		/** @skip_guc_pc: Skip GuC based PM feature init */
+		/** @info.skip_guc_pc: Skip GuC based PM feature init */
 		u8 skip_guc_pc:1;
 
 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -291,10 +291,10 @@ struct xe_device {
 
 	/** @irq: device interrupt state */
 	struct {
-		/** @lock: lock for processing irq's on this device */
+		/** @irq.lock: lock for processing irq's on this device */
 		spinlock_t lock;
 
-		/** @enabled: interrupts enabled on this device */
+		/** @irq.enabled: interrupts enabled on this device */
 		bool enabled;
 	} irq;
 
@@ -303,17 +303,17 @@ struct xe_device {
 
 	/** @mmio: mmio info for device */
 	struct {
-		/** @size: size of MMIO space for device */
+		/** @mmio.size: size of MMIO space for device */
 		size_t size;
-		/** @regs: pointer to MMIO space for device */
+		/** @mmio.regs: pointer to MMIO space for device */
 		void __iomem *regs;
 	} mmio;
 
 	/** @mem: memory info for device */
 	struct {
-		/** @vram: VRAM info for device */
+		/** @mem.vram: VRAM info for device */
 		struct xe_mem_region vram;
-		/** @sys_mgr: system TTM manager */
+		/** @mem.sys_mgr: system TTM manager */
 		struct ttm_resource_manager sys_mgr;
 	} mem;
 
@@ -327,44 +327,44 @@ struct xe_device {
 
 	/** @clients: drm clients info */
 	struct {
-		/** @lock: Protects drm clients info */
+		/** @clients.lock: Protects drm clients info */
 		spinlock_t lock;
 
-		/** @count: number of drm clients */
+		/** @clients.count: number of drm clients */
 		u64 count;
 	} clients;
 
 	/** @usm: unified memory state */
 	struct {
-		/** @asid: convert a ASID to VM */
+		/** @usm.asid: convert a ASID to VM */
 		struct xarray asid_to_vm;
-		/** @next_asid: next ASID, used to cyclical alloc asids */
+		/** @usm.next_asid: next ASID, used to cyclical alloc asids */
 		u32 next_asid;
-		/** @num_vm_in_fault_mode: number of VM in fault mode */
+		/** @usm.num_vm_in_fault_mode: number of VM in fault mode */
 		u32 num_vm_in_fault_mode;
-		/** @num_vm_in_non_fault_mode: number of VM in non-fault mode */
+		/** @usm.num_vm_in_non_fault_mode: number of VM in non-fault mode */
 		u32 num_vm_in_non_fault_mode;
-		/** @lock: protects UM state */
+		/** @usm.lock: protects UM state */
 		struct mutex lock;
 	} usm;
 
 	/** @persistent_engines: engines that are closed but still running */
 	struct {
-		/** @lock: protects persistent engines */
+		/** @persistent_engines.lock: protects persistent engines */
 		struct mutex lock;
-		/** @list: list of persistent engines */
+		/** @persistent_engines.list: list of persistent engines */
 		struct list_head list;
 	} persistent_engines;
 
 	/** @pinned: pinned BO state */
 	struct {
-		/** @lock: protected pinned BO list state */
+		/** @pinned.lock: protected pinned BO list state */
 		spinlock_t lock;
-		/** @evicted: pinned kernel BO that are present */
+		/** @pinned.kernel_bo_present: pinned kernel BO that are present */
 		struct list_head kernel_bo_present;
-		/** @evicted: pinned BO that have been evicted */
+		/** @pinned.evicted: pinned BO that have been evicted */
 		struct list_head evicted;
-		/** @external_vram: pinned external BO in vram*/
+		/** @pinned.external_vram: pinned external BO in vram*/
 		struct list_head external_vram;
 	} pinned;
 
@@ -385,21 +385,26 @@ struct xe_device {
 	 * triggering additional actions when they occur.
 	 */
 	struct {
-		/** @ref: ref count of memory accesses */
-		atomic_t ref;
+		/** @mem_access.ref: ref count of memory accesses */
+		atomic_t mem_access.ref;
 
-		/** @vram_userfault: Encapsulate vram_userfault related stuff */
+		/**
+		 * @mem_access.vram_userfault: Encapsulate vram_userfault
+		 * related stuff
+		 */
 		struct {
 			/**
-			 * @lock: Protects access to @vram_usefault.list
-			 * Using mutex instead of spinlock as lock is applied to entire
-			 * list operation which may sleep
+			 * @mem_access.vram_userfault.lock: Protects access to
+			 * @vram_usefault.list Using mutex instead of spinlock
+			 * as lock is applied to entire list operation which
+			 * may sleep
 			 */
 			struct mutex lock;
 
 			/**
-			 * @list: Keep list of userfaulted vram bo, which require to release their
-			 * mmap mappings at runtime suspend path
+			 * @mem_access.vram_userfault.list: Keep list of userfaulted
+			 * vram bo, which require to release their mmap mappings
+			 * at runtime suspend path
 			 */
 			struct list_head list;
 		} vram_userfault;
@@ -409,28 +414,28 @@ struct xe_device {
 	 * @pat: Encapsulate PAT related stuff
 	 */
 	struct {
-		/** Internal operations to abstract platforms */
+		/** @pat.ops: Internal operations to abstract platforms */
 		const struct xe_pat_ops *ops;
-		/** PAT table to program in the HW */
+		/** @pat.table: PAT table to program in the HW */
 		const struct xe_pat_table_entry *table;
-		/** Number of PAT entries */
+		/** @pat.n_entries: Number of PAT entries */
 		int n_entries;
 		u32 idx[__XE_CACHE_LEVEL_COUNT];
 	} pat;
 
 	/** @d3cold: Encapsulate d3cold related stuff */
 	struct {
-		/** capable: Indicates if root port is d3cold capable */
+		/** @d3cold.capable: Indicates if root port is d3cold capable */
 		bool capable;
 
-		/** @allowed: Indicates if d3cold is a valid device state */
+		/** @d3cold.allowed: Indicates if d3cold is a valid device state */
 		bool allowed;
 
-		/** @power_lost: Indicates if card has really lost power. */
+		/** @d3cold.power_lost: Indicates if card has really lost power. */
 		bool power_lost;
 
 		/**
-		 * @vram_threshold:
+		 * @d3cold.vram_threshold:
 		 *
 		 * This represents the permissible threshold(in megabytes)
 		 * for vram save/restore. d3cold will be disallowed,
@@ -439,7 +444,7 @@ struct xe_device {
 		 * Default threshold value is 300mb.
 		 */
 		u32 vram_threshold;
-		/** @lock: protect vram_threshold */
+		/** @d3cold.lock: protect vram_threshold */
 		struct mutex lock;
 	} d3cold;
 
@@ -547,17 +552,17 @@ struct xe_file {
 
 	/** @vm: VM state for file */
 	struct {
-		/** @xe: xarray to store VMs */
+		/** @vm.xe: xarray to store VMs */
 		struct xarray xa;
-		/** @lock: protects file VM state */
+		/** @vm.lock: protects file VM state */
 		struct mutex lock;
 	} vm;
 
 	/** @exec_queue: Submission exec queue state for file */
 	struct {
-		/** @xe: xarray to store engines */
+		/** @exec_queue.xe: xarray to store engines */
 		struct xarray xa;
-		/** @lock: protects file engine state */
+		/** @exec_queue.lock: protects file engine state */
 		struct mutex lock;
 	} exec_queue;
 
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index e7f84dee5275..648391961fc4 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -109,9 +109,9 @@ struct xe_exec_queue {
 	 * @persistent: persistent exec queue state
 	 */
 	struct {
-		/** @xef: file which this exec queue belongs to */
+		/** @persistent.xef: file which this exec queue belongs to */
 		struct xe_file *xef;
-		/** @link: link in list of persistent exec queues */
+		/** @persisiten.link: link in list of persistent exec queues */
 		struct list_head link;
 	} persistent;
 
@@ -120,55 +120,55 @@ struct xe_exec_queue {
 		 * @parallel: parallel submission state
 		 */
 		struct {
-			/** @composite_fence_ctx: context composite fence */
+			/** @parallel.composite_fence_ctx: context composite fence */
 			u64 composite_fence_ctx;
-			/** @composite_fence_seqno: seqno for composite fence */
+			/** @parallel.composite_fence_seqno: seqno for composite fence */
 			u32 composite_fence_seqno;
 		} parallel;
 		/**
 		 * @bind: bind submission state
 		 */
 		struct {
-			/** @fence_ctx: context bind fence */
+			/** @bind.fence_ctx: context bind fence */
 			u64 fence_ctx;
-			/** @fence_seqno: seqno for bind fence */
+			/** @bind.fence_seqno: seqno for bind fence */
 			u32 fence_seqno;
 		} bind;
 	};
 
 	/** @sched_props: scheduling properties */
 	struct {
-		/** @timeslice_us: timeslice period in micro-seconds */
+		/** @sched_props.timeslice_us: timeslice period in micro-seconds */
 		u32 timeslice_us;
-		/** @preempt_timeout_us: preemption timeout in micro-seconds */
+		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
 		u32 preempt_timeout_us;
-		/** @job_timeout_ms: job timeout in milliseconds */
+		/** @sched_props.job_timeout_ms: job timeout in milliseconds */
 		u32 job_timeout_ms;
-		/** @priority: priority of this exec queue */
+		/** @sched_props.priority: priority of this exec queue */
 		enum xe_exec_queue_priority priority;
 	} sched_props;
 
 	/** @compute: compute exec queue state */
 	struct {
-		/** @pfence: preemption fence */
+		/** @compute.pfence: preemption fence */
 		struct dma_fence *pfence;
-		/** @context: preemption fence context */
+		/** @compute.context: preemption fence context */
 		u64 context;
-		/** @seqno: preemption fence seqno */
+		/** @compute.seqno: preemption fence seqno */
 		u32 seqno;
-		/** @link: link into VM's list of exec queues */
+		/** @compute.link: link into VM's list of exec queues */
 		struct list_head link;
-		/** @lock: preemption fences lock */
+		/** @compute.lock: preemption fences lock */
 		spinlock_t lock;
 	} compute;
 
 	/** @usm: unified shared memory state */
 	struct {
-		/** @acc_trigger: access counter trigger */
+		/** @usm.acc_trigger: access counter trigger */
 		u32 acc_trigger;
-		/** @acc_notify: access counter notify */
+		/** @usm.acc_notify: access counter notify */
 		u32 acc_notify;
-		/** @acc_granularity: access counter granularity */
+		/** @usm.acc_granularity: access counter granularity */
 		u32 acc_granularity;
 	} usm;
 
diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
index 060d0fe848ad..138d8cc0f19c 100644
--- a/drivers/gpu/drm/xe/xe_gsc_types.h
+++ b/drivers/gpu/drm/xe/xe_gsc_types.h
@@ -50,21 +50,21 @@ struct xe_gsc {
 
 	/** @proxy: sub-structure containing the SW proxy-related variables */
 	struct {
-		/** @component: struct for communication with mei component */
+		/** @proxy.component: struct for communication with mei component */
 		struct i915_gsc_proxy_component *component;
-		/** @mutex: protects the component binding and usage */
+		/** @proxy.mutex: protects the component binding and usage */
 		struct mutex mutex;
-		/** @component_added: whether the component has been added */
+		/** @proxy.component_added: whether the component has been added */
 		bool component_added;
-		/** @bo: object to store message to and from the GSC */
+		/** @proxy.bo: object to store message to and from the GSC */
 		struct xe_bo *bo;
-		/** @to_gsc: map of the memory used to send messages to the GSC */
+		/** @proxy.to_gsc: map of the memory used to send messages to the GSC */
 		struct iosys_map to_gsc;
-		/** @from_gsc: map of the memory used to recv messages from the GSC */
+		/** @proxy.from_gsc: map of the memory used to recv messages from the GSC */
 		struct iosys_map from_gsc;
-		/** @to_csme: pointer to the memory used to send messages to CSME */
+		/** @proxy.to_csme: pointer to the memory used to send messages to CSME */
 		void *to_csme;
-		/** @from_csme: pointer to the memory used to recv messages from CSME */
+		/** @proxy.from_csme: pointer to the memory used to recv messages from CSME */
 		void *from_csme;
 	} proxy;
 };
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 047cde6cda10..3caaea2ff908 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -103,16 +103,16 @@ struct xe_gt {
 
 	/** @info: GT info */
 	struct {
-		/** @type: type of GT */
+		/** @info.type: type of GT */
 		enum xe_gt_type type;
-		/** @id: Unique ID of this GT within the PCI Device */
+		/** @info.id: Unique ID of this GT within the PCI Device */
 		u8 id;
-		/** @reference_clock: clock frequency */
+		/** @info.reference_clock: clock frequency */
 		u32 reference_clock;
-		/** @engine_mask: mask of engines present on GT */
+		/** @info.engine_mask: mask of engines present on GT */
 		u64 engine_mask;
 		/**
-		 * @__engine_mask: mask of engines present on GT read from
+		 * @info.__engine_mask: mask of engines present on GT read from
 		 * xe_pci.c, used to fake reading the engine_mask from the
 		 * hwconfig blob.
 		 */
@@ -125,14 +125,14 @@ struct xe_gt {
 	 * specific offset, as well as their own forcewake handling.
 	 */
 	struct {
-		/** @fw: force wake for GT */
+		/** @mmio.fw: force wake for GT */
 		struct xe_force_wake fw;
 		/**
-		 * @adj_limit: adjust MMIO address if address is below this
+		 * @mmio.adj_limit: adjust MMIO address if address is below this
 		 * value
 		 */
 		u32 adj_limit;
-		/** @adj_offset: offect to add to MMIO address when adjusting */
+		/** @mmio.adj_offset: offect to add to MMIO address when adjusting */
 		u32 adj_offset;
 	} mmio;
 
@@ -144,7 +144,7 @@ struct xe_gt {
 	/** @reset: state for GT resets */
 	struct {
 		/**
-		 * @worker: work so GT resets can done async allowing to reset
+		 * @reset.worker: work so GT resets can done async allowing to reset
 		 * code to safely flush all code paths
 		 */
 		struct work_struct worker;
@@ -152,36 +152,37 @@ struct xe_gt {
 
 	/** @tlb_invalidation: TLB invalidation state */
 	struct {
-		/** @seqno: TLB invalidation seqno, protected by CT lock */
+		/** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */
 #define TLB_INVALIDATION_SEQNO_MAX	0x100000
 		int seqno;
 		/**
-		 * @seqno_recv: last received TLB invalidation seqno, protected by CT lock
+		 * @tlb_invalidation.seqno_recv: last received TLB invalidation seqno,
+		 * protected by CT lock
 		 */
 		int seqno_recv;
 		/**
-		 * @pending_fences: list of pending fences waiting TLB
+		 * @tlb_invalidation.pending_fences: list of pending fences waiting TLB
 		 * invaliations, protected by CT lock
 		 */
 		struct list_head pending_fences;
 		/**
-		 * @pending_lock: protects @pending_fences and updating
-		 * @seqno_recv.
+		 * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
+		 * and updating @tlb_invalidation.seqno_recv.
 		 */
 		spinlock_t pending_lock;
 		/**
-		 * @fence_tdr: schedules a delayed call to
+		 * @tlb_invalidation.fence_tdr: schedules a delayed call to
 		 * xe_gt_tlb_fence_timeout after the timeut interval is over.
 		 */
 		struct delayed_work fence_tdr;
-		/** @fence_context: context for TLB invalidation fences */
+		/** @tlb_invalidation.fence_context: context for TLB invalidation fences */
 		u64 fence_context;
 		/**
-		 * @fence_seqno: seqno to TLB invalidation fences, protected by
+		 * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
 		 * tlb_invalidation.lock
 		 */
 		u32 fence_seqno;
-		/** @lock: protects TLB invalidation fences */
+		/** @tlb_invalidation.lock: protects TLB invalidation fences */
 		spinlock_t lock;
 	} tlb_invalidation;
 
@@ -196,7 +197,7 @@ struct xe_gt {
 	/** @usm: unified shared memory state */
 	struct {
 		/**
-		 * @bb_pool: Pool from which batchbuffers, for USM operations
+		 * @usm.bb_pool: Pool from which batchbuffers, for USM operations
 		 * (e.g. migrations, fixing page tables), are allocated.
 		 * Dedicated pool needed so USM operations to not get blocked
 		 * behind any user operations which may have resulted in a
@@ -204,67 +205,67 @@ struct xe_gt {
 		 */
 		struct xe_sa_manager *bb_pool;
 		/**
-		 * @reserved_bcs_instance: reserved BCS instance used for USM
+		 * @usm.reserved_bcs_instance: reserved BCS instance used for USM
 		 * operations (e.g. mmigrations, fixing page tables)
 		 */
 		u16 reserved_bcs_instance;
-		/** @pf_wq: page fault work queue, unbound, high priority */
+		/** @usm.pf_wq: page fault work queue, unbound, high priority */
 		struct workqueue_struct *pf_wq;
-		/** @acc_wq: access counter work queue, unbound, high priority */
+		/** @usm.acc_wq: access counter work queue, unbound, high priority */
 		struct workqueue_struct *acc_wq;
 		/**
-		 * @pf_queue: Page fault queue used to sync faults so faults can
+		 * @usm.pf_queue: Page fault queue used to sync faults so faults can
 		 * be processed not under the GuC CT lock. The queue is sized so
 		 * it can sync all possible faults (1 per physical engine).
 		 * Multiple queues exists for page faults from different VMs are
 		 * be processed in parallel.
 		 */
 		struct pf_queue {
-			/** @gt: back pointer to GT */
+			/** @usm.pf_queue.gt: back pointer to GT */
 			struct xe_gt *gt;
 #define PF_QUEUE_NUM_DW	128
-			/** @data: data in the page fault queue */
+			/** @usm.pf_queue.data: data in the page fault queue */
 			u32 data[PF_QUEUE_NUM_DW];
 			/**
-			 * @tail: tail pointer in DWs for page fault queue,
+			 * @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
 			 * moved by worker which processes faults (consumer).
 			 */
 			u16 tail;
 			/**
-			 * @head: head pointer in DWs for page fault queue,
+			 * @usm.pf_queue.head: head pointer in DWs for page fault queue,
 			 * moved by G2H handler (producer).
 			 */
 			u16 head;
-			/** @lock: protects page fault queue */
+			/** @usm.pf_queue.lock: protects page fault queue */
 			spinlock_t lock;
-			/** @worker: to process page faults */
+			/** @usm.pf_queue.worker: to process page faults */
 			struct work_struct worker;
 #define NUM_PF_QUEUE	4
 		} pf_queue[NUM_PF_QUEUE];
 		/**
-		 * @acc_queue: Same as page fault queue, cannot process access
+		 * @usm.acc_queue: Same as page fault queue, cannot process access
 		 * counters under CT lock.
 		 */
 		struct acc_queue {
-			/** @gt: back pointer to GT */
+			/** @usm.acc_queue.gt: back pointer to GT */
 			struct xe_gt *gt;
 #define ACC_QUEUE_NUM_DW	128
-			/** @data: data in the page fault queue */
+			/** @usm.acc_queue.data: data in the page fault queue */
 			u32 data[ACC_QUEUE_NUM_DW];
 			/**
-			 * @tail: tail pointer in DWs for access counter queue,
+			 * @usm.acc_queue.tail: tail pointer in DWs for access counter queue,
 			 * moved by worker which processes counters
 			 * (consumer).
 			 */
 			u16 tail;
 			/**
-			 * @head: head pointer in DWs for access counter queue,
+			 * @usm.acc_queue.head: head pointer in DWs for access counter queue,
 			 * moved by G2H handler (producer).
 			 */
 			u16 head;
-			/** @lock: protects page fault queue */
+			/** @usm.acc_queue.lock: protects page fault queue */
 			spinlock_t lock;
-			/** @worker: to process access counters */
+			/** @usm.acc_queue.worker: to process access counters */
 			struct work_struct worker;
 #define NUM_ACC_QUEUE	4
 		} acc_queue[NUM_ACC_QUEUE];
@@ -301,7 +302,7 @@ struct xe_gt {
 
 	/** @pcode: GT's PCODE */
 	struct {
-		/** @lock: protecting GT's PCODE mailbox data */
+		/** @pcode.lock: protecting GT's PCODE mailbox data */
 		struct mutex lock;
 	} pcode;
 
@@ -313,32 +314,32 @@ struct xe_gt {
 
 	/** @mocs: info */
 	struct {
-		/** @uc_index: UC index */
+		/** @mocs.uc_index: UC index */
 		u8 uc_index;
-		/** @wb_index: WB index, only used on L3_CCS platforms */
+		/** @mocs.wb_index: WB index, only used on L3_CCS platforms */
 		u8 wb_index;
 	} mocs;
 
 	/** @fuse_topo: GT topology reported by fuse registers */
 	struct {
-		/** @g_dss_mask: dual-subslices usable by geometry */
+		/** @fuse_topo.g_dss_mask: dual-subslices usable by geometry */
 		xe_dss_mask_t g_dss_mask;
 
-		/** @c_dss_mask: dual-subslices usable by compute */
+		/** @fuse_topo.c_dss_mask: dual-subslices usable by compute */
 		xe_dss_mask_t c_dss_mask;
 
-		/** @eu_mask_per_dss: EU mask per DSS*/
+		/** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/
 		xe_eu_mask_t eu_mask_per_dss;
 	} fuse_topo;
 
 	/** @steering: register steering for individual HW units */
 	struct {
-		/* @ranges: register ranges used for this steering type */
+		/** @steering.ranges: register ranges used for this steering type */
 		const struct xe_mmio_range *ranges;
 
-		/** @group_target: target to steer accesses to */
+		/** @steering.group_target: target to steer accesses to */
 		u16 group_target;
-		/** @instance_target: instance to steer accesses to */
+		/** @steering.instance_target: instance to steer accesses to */
 		u16 instance_target;
 	} steering[NUM_STEERING_TYPES];
 
@@ -350,13 +351,13 @@ struct xe_gt {
 
 	/** @wa_active: keep track of active workarounds */
 	struct {
-		/** @gt: bitmap with active GT workarounds */
+		/** @wa_active.gt: bitmap with active GT workarounds */
 		unsigned long *gt;
-		/** @engine: bitmap with active engine workarounds */
+		/** @wa_active.engine: bitmap with active engine workarounds */
 		unsigned long *engine;
-		/** @lrc: bitmap with active LRC workarounds */
+		/** @wa_active.lrc: bitmap with active LRC workarounds */
 		unsigned long *lrc;
-		/** @oob: bitmap with active OOB workaroudns */
+		/** @wa_active.oob: bitmap with active OOB workaroudns */
 		unsigned long *oob;
 	} wa_active;
 };
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index d814d4ee3fc6..68c871052341 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -87,9 +87,9 @@ struct xe_guc_ct {
 	spinlock_t fast_lock;
 	/** @ctbs: buffers for sending and receiving commands */
 	struct {
-		/** @send: Host to GuC (H2G, send) channel */
+		/** @ctbs.send: Host to GuC (H2G, send) channel */
 		struct guc_ctb h2g;
-		/** @recv: GuC to Host (G2H, receive) channel */
+		/** @ctbs.recv: GuC to Host (G2H, receive) channel */
 		struct guc_ctb g2h;
 	} ctbs;
 	/** @g2h_outstanding: number of outstanding G2H */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit_types.h b/drivers/gpu/drm/xe/xe_guc_submit_types.h
index 649b0a852692..72fc0f42b0a5 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit_types.h
@@ -102,9 +102,9 @@ struct xe_guc_submit_exec_queue_snapshot {
 
 	/** @sched_props: scheduling properties */
 	struct {
-		/** @timeslice_us: timeslice period in micro-seconds */
+		/** @sched_props.timeslice_us: timeslice period in micro-seconds */
 		u32 timeslice_us;
-		/** @preempt_timeout_us: preemption timeout in micro-seconds */
+		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
 		u32 preempt_timeout_us;
 	} sched_props;
 
@@ -118,11 +118,11 @@ struct xe_guc_submit_exec_queue_snapshot {
 
 	/** @guc: GuC Engine Snapshot */
 	struct {
-		/** @wqi_head: work queue item head */
+		/** @guc.wqi_head: work queue item head */
 		u32 wqi_head;
-		/** @wqi_tail: work queue item tail */
+		/** @guc.wqi_tail: work queue item tail */
 		u32 wqi_tail;
-		/** @id: GuC id for this exec_queue */
+		/** @guc.id: GuC id for this exec_queue */
 		u16 id;
 	} guc;
 
@@ -133,13 +133,13 @@ struct xe_guc_submit_exec_queue_snapshot {
 	bool parallel_execution;
 	/** @parallel: snapshot of the useful parallel scratch */
 	struct {
-		/** @wq_desc: Workqueue description */
+		/** @parallel.wq_desc: Workqueue description */
 		struct {
-			/** @head: Workqueue Head */
+			/** @parallel.wq_desc.head: Workqueue Head */
 			u32 head;
-			/** @tail: Workqueue Tail */
+			/** @parallel.wq_desc.tail: Workqueue Tail */
 			u32 tail;
-			/** @status: Workqueue Status */
+			/** @parallel.wq_desc.status: Workqueue Status */
 			u32 status;
 		} wq_desc;
 		/** @wq: Workqueue Items */
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index dc6059de669c..edcd1a950bd3 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -49,40 +49,40 @@ struct xe_guc {
 	struct xe_guc_db_mgr dbm;
 	/** @submission_state: GuC submission state */
 	struct {
-		/** @exec_queue_lookup: Lookup an xe_engine from guc_id */
+		/** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */
 		struct xarray exec_queue_lookup;
-		/** @guc_ids: used to allocate new guc_ids, single-lrc */
+		/** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */
 		struct ida guc_ids;
-		/** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
+		/** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
 		unsigned long *guc_ids_bitmap;
-		/** @stopped: submissions are stopped */
+		/** @submission_state.stopped: submissions are stopped */
 		atomic_t stopped;
-		/** @lock: protects submission state */
+		/** @submission_state.lock: protects submission state */
 		struct mutex lock;
-		/** @suspend: suspend fence state */
+		/** @submission_state.suspend: suspend fence state */
 		struct {
-			/** @lock: suspend fences lock */
+			/** @submission_state.suspend.lock: suspend fences lock */
 			spinlock_t lock;
-			/** @context: suspend fences context */
+			/** @submission_state.suspend.context: suspend fences context */
 			u64 context;
-			/** @seqno: suspend fences seqno */
+			/** @submission_state.suspend.seqno: suspend fences seqno */
 			u32 seqno;
 		} suspend;
 #ifdef CONFIG_PROVE_LOCKING
 #define NUM_SUBMIT_WQ	256
-		/** @submit_wq_pool: submission ordered workqueues pool */
+		/** @submission_state.submit_wq_pool: submission ordered workqueues pool */
 		struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
-		/** @submit_wq_idx: submission ordered workqueue index */
+		/** @submission_state.submit_wq_idx: submission ordered workqueue index */
 		int submit_wq_idx;
 #endif
-		/** @enabled: submission is enabled */
+		/** @submission_state.enabled: submission is enabled */
 		bool enabled;
 	} submission_state;
 	/** @hwconfig: Hardware config state */
 	struct {
-		/** @bo: buffer object of the hardware config */
+		/** @hwconfig.bo: buffer object of the hardware config */
 		struct xe_bo *bo;
-		/** @size: size of the hardware config */
+		/** @hwconfig.size: size of the hardware config */
 		u32 size;
 	} hwconfig;
 
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index dfeaaac08b7f..61d9027e4665 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -79,23 +79,23 @@ struct xe_hw_engine_class_intf {
 	 * @defaults: default scheduling properties
 	 */
 	struct {
-		/** @set_job_timeout: Set job timeout in ms for engine */
+		/** @sched_props.set_job_timeout: Set job timeout in ms for engine */
 		u32 job_timeout_ms;
-		/** @job_timeout_min: Min job timeout in ms for engine */
+		/** @sched_props.job_timeout_min: Min job timeout in ms for engine */
 		u32 job_timeout_min;
-		/** @job_timeout_max: Max job timeout in ms for engine */
+		/** @sched_props.job_timeout_max: Max job timeout in ms for engine */
 		u32 job_timeout_max;
-		/** @timeslice_us: timeslice period in micro-seconds */
+		/** @sched_props.timeslice_us: timeslice period in micro-seconds */
 		u32 timeslice_us;
-		/** @timeslice_min: min timeslice period in micro-seconds */
+		/** @sched_props.timeslice_min: min timeslice period in micro-seconds */
 		u32 timeslice_min;
-		/** @timeslice_max: max timeslice period in micro-seconds */
+		/** @sched_props.timeslice_max: max timeslice period in micro-seconds */
 		u32 timeslice_max;
-		/** @preempt_timeout_us: preemption timeout in micro-seconds */
+		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
 		u32 preempt_timeout_us;
-		/** @preempt_timeout_min: min preemption timeout in micro-seconds */
+		/** @sched_props.preempt_timeout_min: min preemption timeout in micro-seconds */
 		u32 preempt_timeout_min;
-		/** @preempt_timeout_max: max preemption timeout in micro-seconds */
+		/** @sched_props.preempt_timeout_max: max preemption timeout in micro-seconds */
 		u32 preempt_timeout_max;
 	} sched_props, defaults;
 };
@@ -164,62 +164,62 @@ struct xe_hw_engine_snapshot {
 	u16 logical_instance;
 	/** @forcewake: Force Wake information snapshot */
 	struct {
-		/** @domain: force wake domain of this hw engine */
+		/** @forcewake.domain: force wake domain of this hw engine */
 		enum xe_force_wake_domains domain;
-		/** @ref: Forcewake ref for the above domain */
+		/** @forcewake.ref: Forcewake ref for the above domain */
 		int ref;
 	} forcewake;
 	/** @mmio_base: MMIO base address of this hw engine*/
 	u32 mmio_base;
 	/** @reg: Useful MMIO register snapshot */
 	struct {
-		/** @ring_hwstam: RING_HWSTAM */
+		/** @reg.ring_hwstam: RING_HWSTAM */
 		u32 ring_hwstam;
-		/** @ring_hws_pga: RING_HWS_PGA */
+		/** @reg.ring_hws_pga: RING_HWS_PGA */
 		u32 ring_hws_pga;
-		/** @ring_execlist_status_lo: RING_EXECLIST_STATUS_LO */
+		/** @reg.ring_execlist_status_lo: RING_EXECLIST_STATUS_LO */
 		u32 ring_execlist_status_lo;
-		/** @ring_execlist_status_hi: RING_EXECLIST_STATUS_HI */
+		/** @reg.ring_execlist_status_hi: RING_EXECLIST_STATUS_HI */
 		u32 ring_execlist_status_hi;
-		/** @ring_execlist_sq_contents_lo: RING_EXECLIST_SQ_CONTENTS */
+		/** @reg.ring_execlist_sq_contents_lo: RING_EXECLIST_SQ_CONTENTS */
 		u32 ring_execlist_sq_contents_lo;
-		/** @ring_execlist_sq_contents_hi: RING_EXECLIST_SQ_CONTENTS + 4 */
+		/** @reg.ring_execlist_sq_contents_hi: RING_EXECLIST_SQ_CONTENTS + 4 */
 		u32 ring_execlist_sq_contents_hi;
-		/** @ring_start: RING_START */
+		/** @reg.ring_start: RING_START */
 		u32 ring_start;
-		/** @ring_head: RING_HEAD */
+		/** @reg.ring_head: RING_HEAD */
 		u32 ring_head;
-		/** @ring_tail: RING_TAIL */
+		/** @reg.ring_tail: RING_TAIL */
 		u32 ring_tail;
-		/** @ring_ctl: RING_CTL */
+		/** @reg.ring_ctl: RING_CTL */
 		u32 ring_ctl;
-		/** @ring_mi_mode: RING_MI_MODE */
+		/** @reg.ring_mi_mode: RING_MI_MODE */
 		u32 ring_mi_mode;
-		/** @ring_mode: RING_MODE */
+		/** @reg.ring_mode: RING_MODE */
 		u32 ring_mode;
-		/** @ring_imr: RING_IMR */
+		/** @reg.ring_imr: RING_IMR */
 		u32 ring_imr;
-		/** @ring_esr: RING_ESR */
+		/** @reg.ring_esr: RING_ESR */
 		u32 ring_esr;
-		/** @ring_emr: RING_EMR */
+		/** @reg.ring_emr: RING_EMR */
 		u32 ring_emr;
-		/** @ring_eir: RING_EIR */
+		/** @reg.ring_eir: RING_EIR */
 		u32 ring_eir;
-		/** @ring_acthd_udw: RING_ACTHD_UDW */
+		/** @reg.ring_acthd_udw: RING_ACTHD_UDW */
 		u32 ring_acthd_udw;
-		/** @ring_acthd: RING_ACTHD */
+		/** @reg.ring_acthd: RING_ACTHD */
 		u32 ring_acthd;
-		/** @ring_bbaddr_udw: RING_BBADDR_UDW */
+		/** @reg.ring_bbaddr_udw: RING_BBADDR_UDW */
 		u32 ring_bbaddr_udw;
-		/** @ring_bbaddr: RING_BBADDR */
+		/** @reg.ring_bbaddr: RING_BBADDR */
 		u32 ring_bbaddr;
-		/** @ring_dma_fadd_udw: RING_DMA_FADD_UDW */
+		/** @reg.ring_dma_fadd_udw: RING_DMA_FADD_UDW */
 		u32 ring_dma_fadd_udw;
-		/** @ring_dma_fadd: RING_DMA_FADD */
+		/** @reg.ring_dma_fadd: RING_DMA_FADD */
 		u32 ring_dma_fadd;
-		/** @ipehr: IPEHR */
+		/** @reg.ipehr: IPEHR */
 		u32 ipehr;
-		/** @rcu_mode: RCU_MODE */
+		/** @reg.rcu_mode: RCU_MODE */
 		u32 rcu_mode;
 	} reg;
 };
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 78220336062c..24f20ed66fd1 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -28,11 +28,11 @@ struct xe_lrc {
 
 	/** @ring: submission ring state */
 	struct {
-		/** @size: size of submission ring */
+		/** @ring.size: size of submission ring */
 		u32 size;
-		/** @tail: tail of submission ring */
+		/** @ring.tail: tail of submission ring */
 		u32 tail;
-		/** @old_tail: shadow of tail */
+		/** @ring.old_tail: shadow of tail */
 		u32 old_tail;
 	} ring;
 
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index 71213ba9735b..8778c34d6620 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -30,11 +30,11 @@ struct xe_sched_job {
 	struct dma_fence *fence;
 	/** @user_fence: write back value when BB is complete */
 	struct {
-		/** @used: user fence is used */
+		/** @user_fence.used: user fence is used */
 		bool used;
-		/** @addr: address to write to */
+		/** @user_fence.addr: address to write to */
 		u64 addr;
-		/** @value: write back value */
+		/** @user_fence.value: write back value */
 		u64 value;
 	} user_fence;
 	/** @migrate_flush_flags: Additional flush flags for migration jobs */
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index ee914a5d8523..bc800b696866 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -124,11 +124,14 @@ struct xe_uc_fw {
 
 	/** @versions: FW versions wanted and found */
 	struct {
-		/** @wanted: firmware version wanted by platform */
+		/** @versions.wanted: firmware version wanted by platform */
 		struct xe_uc_fw_version wanted;
-		/** @wanted_type: type of firmware version wanted (release vs compatibility) */
+		/**
+		 * @versions.wanted_type: type of firmware version wanted
+		 * (release vs compatibility)
+		 */
 		enum xe_uc_fw_version_types wanted_type;
-		/** @found: fw versions found in firmware blob */
+		/** @versions.found: fw versions found in firmware blob */
 		struct xe_uc_fw_version found[XE_UC_FW_VER_TYPE_COUNT];
 	} versions;
 
diff --git a/drivers/gpu/drm/xe/xe_wopcm_types.h b/drivers/gpu/drm/xe/xe_wopcm_types.h
index 486d850c4084..99d34837c408 100644
--- a/drivers/gpu/drm/xe/xe_wopcm_types.h
+++ b/drivers/gpu/drm/xe/xe_wopcm_types.h
@@ -16,9 +16,9 @@ struct xe_wopcm {
 	u32 size;
 	/** @guc: GuC WOPCM Region info */
 	struct {
-		/** @base: GuC WOPCM base which is offset from WOPCM base */
+		/** @guc.base: GuC WOPCM base which is offset from WOPCM base */
 		u32 base;
-		/** @size: Size of the GuC WOPCM region */
+		/** @guc.size: Size of the GuC WOPCM region */
 		u32 size;
 	} guc;
 };
-- 
2.43.0



More information about the Intel-xe mailing list