[PATCH v7 2/2] drm/xe: restore xe_mmio_ioctl as the ioctl handler of the mmio debugfs file

Ofir Bitton obitton at habana.ai
Sun Apr 14 17:13:31 UTC 2024


On 14/04/2024 11:11, Koby Elbaz wrote:
> The drm mmio ioctl handler (xe_mmio_ioctl) wasn't needed anymore,
> and therefore, recently removed.
> Nevertheless, it is now restored for debugging purposes to function
> as the ioctl handler of the 'mmio' debugfs file.
> 
> Notes:
> 1. The non-admin user's limited mmio access (aka, whitelist),
> was removed being irrelevant anymore, now that a user using debugfs
> must anyway have root permission to use it.
> 2. To support multi-tile access, the tile index is now dynamic and
> will be retrieved from the given mmio address itself.
> 
> Cc: Matt Roper <matthew.d.roper at intel.com>
> Cc: Daniel Vetter <daniel at ffwll.ch>
> Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
> Cc: Ofir Bitton <obitton at habana.ai>
> Cc: José Roberto de Souza <jose.souza at intel.com>
> Cc: Michal Winiarski <michal.winiarski at intel.com>
> Signed-off-by: Koby Elbaz <kelbaz at habana.ai>
> ---
> Changes in v7:
>   - Get/put forcewake will only be done for enabled/available GTs.
>   - Added Multi-tile/GT MMIO access support.
>   - Patchset was rebased on top of drm-tip, rather than drm-xe-next.
> 
>   drivers/gpu/drm/xe/xe_debugfs.c      | 160 ++++++++++++++++++++++++++-
>   drivers/gpu/drm/xe/xe_debugfs_mmio.h |  43 +++++++
>   2 files changed, 201 insertions(+), 2 deletions(-)
>   create mode 100644 drivers/gpu/drm/xe/xe_debugfs_mmio.h
> 
> diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
> index ce27516ffac0..4ad728b937fd 100644
> --- a/drivers/gpu/drm/xe/xe_debugfs.c
> +++ b/drivers/gpu/drm/xe/xe_debugfs.c
> @@ -6,7 +6,7 @@
>   #include "xe_debugfs.h"
>   
>   #include <linux/string_helpers.h>
> -
> +#include <linux/security.h>
>   #include <drm/drm_debugfs.h>
>   
>   #include "xe_bo.h"
> @@ -15,6 +15,8 @@
>   #include "xe_pm.h"
>   #include "xe_sriov.h"
>   #include "xe_step.h"
> +#include "xe_debugfs_mmio.h"
> +#include "xe_mmio.h"
>   
>   #ifdef CONFIG_DRM_XE_DEBUG
>   #include "xe_bo_evict.h"
> @@ -111,8 +113,162 @@ static int forcewake_release(struct inode *inode, struct file *file)
>   	return 0;
>   }
>   
> -static long xe_debugfs_mmio_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
> +static int get_tile_id_of_mmio_addr(struct xe_device *xe, u32 mmio_addr, u8 *tile_id)
> +{
> +	/* MMIO space size is identical for all tiles so arbitrarily choose root tile */
> +	struct xe_tile *tile = xe_device_get_root_tile(xe);
> +	u32 tile_mmio_base_addr = 0;
> +	u8 id;
> +
> +	if (mmio_addr >= (tile->mmio.size * xe->info.tile_count))
> +		return -EINVAL;
> +
> +	for_each_tile(tile, xe, id) {
> +		tile_mmio_base_addr = id * tile->mmio.size;
> +		/* Each tile has a 16M MMIO space, but the contained CFG space is only 4M */
> +		if ((mmio_addr >= tile_mmio_base_addr) &&
> +				(mmio_addr < (tile_mmio_base_addr + SZ_4M))) {
> +			*tile_id = id;
> +			return 0;
> +		}
> +	}
> +
> +	return -EINVAL;
> +}
> +
> +static int assign_mmio_args_to_xe_reg(struct xe_device *xe, struct xe_debugfs_mmio_ioctl_data *args,
> +				      struct xe_reg *reg, u8 *tile_id)
>   {
> +	unsigned int mmio_bits_flag, bytes;
> +	u32 mmio_offset_in_tile;
> +	struct xe_gt *gt;
> +	u8 local_tile_id;
> +	int ret;
> +
> +	mmio_bits_flag = args->flags & XE_DEBUGFS_MMIO_BITS_MASK;
> +	bytes = 1 << mmio_bits_flag;
> +
> +	ret = get_tile_id_of_mmio_addr(xe, args->addr, &local_tile_id);
> +	if (ret)
> +		return ret;
> +
> +	gt = xe_device_get_gt(xe, local_tile_id);
> +	mmio_offset_in_tile = args->addr - local_tile_id * SZ_16M;
> +
> +	/*
> +	 * TODO: migrate to xe_gt_mcr to lookup the mmio range and handle
> +	 * multicast registers. Steering would need uapi extension.
> +	 */
> +	*reg = XE_REG(mmio_offset_in_tile);
> +
> +	if (XE_IOCTL_DBG(xe, reg->addr + bytes > gt_to_tile(gt)->mmio.size))
> +		return -EINVAL;
> +
> +	*tile_id = local_tile_id;
> +
> +	return 0;
> +}
> +
> +static int _xe_debugfs_mmio_ioctl(struct xe_device *xe, struct xe_debugfs_mmio_ioctl_data *args)
> +{
> +	struct xe_reg reg;
> +	struct xe_gt *gt;
> +	u8 tile_id, id;
> +	int ret;
> +
> +	if (XE_IOCTL_DBG(xe, args->extensions) ||
> +	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> +		return -EINVAL;
> +
> +	if (XE_IOCTL_DBG(xe, args->flags & ~XE_DEBUGFS_MMIO_VALID_FLAGS))
> +		return -EINVAL;
> +
> +	if (XE_IOCTL_DBG(xe, !(args->flags & XE_DEBUGFS_MMIO_WRITE) && args->value))
> +		return -EINVAL;
> +
> +	xe_device_mem_access_get(xe);
> +	for_each_gt(gt, xe, id)
> +		XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
> +
> +	ret = assign_mmio_args_to_xe_reg(xe, args, &reg, &tile_id);
> +	if (ret)
> +		goto exit;
> +
> +	gt = xe_device_get_gt(xe, tile_id);
> +
> +	if (args->flags & XE_DEBUGFS_MMIO_WRITE) {
> +		switch (args->flags & XE_DEBUGFS_MMIO_BITS_MASK) {
> +		case XE_DEBUGFS_MMIO_32BIT:
> +			if (XE_IOCTL_DBG(xe, args->value > U32_MAX)) {
> +				ret = -EINVAL;
> +				goto exit;
> +			}
> +			xe_mmio_write32(gt, reg, args->value);
> +			break;
> +		default:
> +			drm_dbg(&xe->drm, "Invalid MMIO bit size");
> +			fallthrough;
> +		case XE_DEBUGFS_MMIO_8BIT: /* TODO */
> +		case XE_DEBUGFS_MMIO_16BIT: /* TODO */
> +			ret = -EOPNOTSUPP;
> +			goto exit;
> +		}
> +	}
> +
> +	if (args->flags & XE_DEBUGFS_MMIO_READ) {
> +		switch (args->flags & XE_DEBUGFS_MMIO_BITS_MASK) {
> +		case XE_DEBUGFS_MMIO_32BIT:
> +			args->value = xe_mmio_read32(gt, reg);
> +			break;
> +		case XE_DEBUGFS_MMIO_64BIT:
> +			args->value = xe_mmio_read64_2x32(gt, reg);
> +			break;
> +		default:
> +			drm_dbg(&xe->drm, "Invalid MMIO bit size");
> +			fallthrough;
> +		case XE_DEBUGFS_MMIO_8BIT: /* TODO */
> +		case XE_DEBUGFS_MMIO_16BIT: /* TODO */
> +			ret = -EOPNOTSUPP;
> +		}
> +	}
> +
> +exit:
> +	for_each_gt(gt, xe, id)
> +		XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
> +
> +	xe_device_mem_access_put(xe);
> +
> +	return ret;
> +}
> +
> +static long xe_debugfs_mmio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
> +{
> +	struct xe_device *xe = file_inode(filp)->i_private;
> +	struct xe_debugfs_mmio_ioctl_data mmio_data;
> +	int ret;
> +
> +	ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
> +	if (ret)
> +		return ret;
> +
> +	if (cmd == XE_DEBUGFS_MMIO_IOCTL) {
> +		ret = copy_from_user(&mmio_data, (struct xe_debugfs_mmio_ioctl_data __user *)arg,
> +				     sizeof(mmio_data));
> +		if (ret)
> +			return -EFAULT;
> +
> +		ret = _xe_debugfs_mmio_ioctl(xe, &mmio_data);
> +		if (ret)
> +			return ret;
> +
> +		ret = copy_to_user((struct xe_debugfs_mmio_ioctl_data __user *)arg,
> +				   &mmio_data, sizeof(mmio_data));
> +		if (ret)
> +			return -EFAULT;
> +	} else {
> +		return -EINVAL;
> +	}
> +
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/xe/xe_debugfs_mmio.h b/drivers/gpu/drm/xe/xe_debugfs_mmio.h
> new file mode 100644
> index 000000000000..1dfb8fa2ce9a
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_debugfs_mmio.h
> @@ -0,0 +1,43 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2023 Intel Corporation
> + */
> +
> +#ifndef _XE_DEBUGFS_MMIO_H_
> +#define _XE_DEBUGFS_MMIO_H_
> +
> +#include <drm/drm.h>
> +
> +#define XE_DEBUGFS_MMIO_8BIT		0x0
> +#define XE_DEBUGFS_MMIO_16BIT		0x1
> +#define XE_DEBUGFS_MMIO_32BIT		0x2
> +#define XE_DEBUGFS_MMIO_64BIT		0x3
> +#define XE_DEBUGFS_MMIO_BITS_MASK	0x3
> +#define XE_DEBUGFS_MMIO_READ		0x4
> +#define XE_DEBUGFS_MMIO_WRITE		0x8
> +
> +#define XE_DEBUGFS_MMIO_VALID_FLAGS (\
> +	XE_DEBUGFS_MMIO_BITS_MASK |\
> +	XE_DEBUGFS_MMIO_READ |\
> +	XE_DEBUGFS_MMIO_WRITE)
> +
> +struct xe_debugfs_mmio_ioctl_data {
> +	/** @extensions: Pointer to the first extension struct, if any */
> +	__u64 extensions;
> +
> +	__u32 addr;
> +
> +	__u32 flags;
> +
> +	__u64 value;
> +
> +	/** @reserved: Reserved */
> +	__u64 reserved[2];
> +};
> +
> +#define XE_DEBUGFS_MMIO_IOCTL_NR	0x00
> +
> +#define XE_DEBUGFS_MMIO_IOCTL \
> +	_IOWR(0x20, XE_DEBUGFS_MMIO_IOCTL_NR, struct xe_debugfs_mmio_ioctl_data)
> +
> +#endif /* _XE_DEBUGFS_MMIO_H_ */

Reviewed-by: Ofir Bitton <obitton at habana.ai>


More information about the Intel-xe mailing list