[PATCH 3/3] drm/amd/amdkfd: Surface files in Sysfs to allow users to get number of compute units that are in use.
Ramesh Errabolu
Ramesh.Errabolu at amd.com
Thu Sep 17 17:11:18 UTC 2020
[Why]
Allow user to know how many compute units (CU) are in use at any given
moment.
[How]
Surface files in Sysfs that allow user to determine the number of compute
units that are in use for a given process. One Sysfs file is used per
device.
Signed-off-by: Ramesh Errabolu <Ramesh.Errabolu at amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 28 ++++++-
drivers/gpu/drm/amd/amdkfd/kfd_process.c | 101 +++++++++++++++++++----
2 files changed, 114 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 023629f28495..2ce03586d0dc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -631,7 +631,7 @@ enum kfd_pdd_bound {
PDD_BOUND_SUSPENDED,
};
-#define MAX_SYSFS_FILENAME_LEN 11
+#define MAX_SYSFS_FILENAME_LEN 32
/*
* SDMA counter runs at 100MHz frequency.
@@ -692,6 +692,32 @@ struct kfd_process_device {
uint64_t sdma_past_activity_counter;
struct attribute attr_sdma;
char sdma_filename[MAX_SYSFS_FILENAME_LEN];
+
+ /*
+ * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
+ * that is associated with device encoded by "this" struct instance. The
+ * value reflects CU usage by all of the waves launched by this process
+ * on this device. A very important property of occupancy parameter is
+ * that its value is a a snapshot of current use.
+ *
+ * Following is to be noted regarding how this parameter is reported:
+ *
+ * The number of waves that a CU can launch is limited by couple of
+ * parameters. These are encoded by struct amdgpu_cu_info instance
+ * that is part of every device definition. For GFX9 devices this
+ * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves
+ * do not use scratch memory and 32 waves (max_scratch_slots_per_cu)
+ * when they use. This could change for future devices and therefore
+ * this example should be considered as a guide.
+ *
+ * All CU's of a device are available for the process. This may not be true
+ * under certain conditions - e.g. CU masking.
+ *
+ * Finally number of CU's that are occupied by a process is affected by both
+ * number of CU's a device has along with number of other competing processes
+ */
+ struct attribute attr_cu_occupancy;
+ char cu_occupancy_filename[MAX_SYSFS_FILENAME_LEN];
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a0e12a79ab7d..3baffbc828b2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -249,6 +249,63 @@ static void kfd_sdma_activity_worker(struct work_struct *work)
}
}
+/**
+ * @kfd_get_cu_occupancy() - Collect number of waves in-flight on this device
+ * by current process. Translates acquired wave count into number of compute units
+ * that are occupied.
+ *
+ * @atr: Handle of attribute that allows reporting of wave count. The attribute
+ * handle encapsulates GPU device it is associated with, thereby allowing collection
+ * of waves in flight, etc
+ *
+ * @buffer: Handle of user provided buffer updated with wave count
+ *
+ * Return: Number of bytes written to user buffer or an error value
+ */
+static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
+{
+ int cu_cnt;
+ int wave_cnt;
+ int max_waves_per_cu;
+ struct kfd_dev *dev = NULL;
+ struct kfd_process *proc = NULL;
+ struct amdgpu_device *adev = NULL;
+ struct kfd_process_device *pdd = NULL;
+
+ /* Acquire handle of Process-Device-Data associated with attribute */
+ pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
+
+ /*
+ * Acquire handle of Gpu Device associated with attribute. Determine
+ * if ability to collect CU occupancy is defined for this device. If
+ * so acquire the handle of process which encapsulates the PASID of
+ * process. It is possible that the process might have zero work on
+ * device. This is determined by checking if process has any queues
+ * @note: Ability to collect is defined only for Gfx9 devices
+ */
+ cu_cnt = 0;
+ dev = pdd->dev;
+ proc = pdd->process;
+ if (pdd->qpd.queue_count == 0) {
+ pr_info("%s: Gpu-Id: %d has no active queues for process %d\n",
+ __func__, dev->id, proc->pasid);
+ return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+ }
+
+ /* Collect wave count from device by reading relevant registers */
+ wave_cnt = 0;
+ if (dev->kfd2kgd->get_cu_occupancy != NULL)
+ dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid,
+ &wave_cnt);
+
+ /* Translate wave count to number of compute units */
+ adev = (struct amdgpu_device *)dev->kgd;
+ max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
+ adev->gfx.cu_info.max_waves_per_simd;
+ cu_cnt = wave_cnt / max_waves_per_cu;
+ return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+}
+
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
@@ -279,6 +336,10 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(sdma_activity_work_handler.sdma_activity_counter)/
SDMA_ACTIVITY_DIVISOR);
+
+ /* Sysfs handle that gets CU occupancy is per device */
+ } else if (strncmp(attr->name, "cu_occupancy_", 13) == 0) {
+ return kfd_get_cu_occupancy(attr, buffer);
} else {
pr_err("Invalid attribute");
return -EINVAL;
@@ -432,27 +493,37 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
* Create sysfs files for each GPU:
* - proc/<pid>/vram_<gpuid>
* - proc/<pid>/sdma_<gpuid>
+ * - proc/<pid>/cu_occupancy_<gpuid>
*/
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
pdd->dev->id);
- ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
+ ret = kfd_sysfs_create_file(p, &pdd->attr_vram,
+ pdd->vram_filename);
if (ret)
pr_warn("Creating vram usage for gpu id %d failed",
(int)pdd->dev->id);
-
- snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
- pdd->dev->id);
- ret = kfd_sysfs_create_file(p, &pdd->attr_sdma, pdd->sdma_filename);
- if (ret)
- pr_warn("Creating sdma usage for gpu id %d failed",
- (int)pdd->dev->id);
- }
-
+ if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
+ snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN,
+ "sdma_%u", pdd->dev->id);
+ ret = kfd_sysfs_create_file(p, &pdd->attr_sdma,
+ pdd->sdma_filename);
+ if (ret)
+ pr_warn("Creating sdma usage for gpu id %d failed",
+ (int)pdd->dev->id);
+ snprintf(pdd->cu_occupancy_filename,
+ MAX_SYSFS_FILENAME_LEN, "cu_occupancy_%u",
+ pdd->dev->id);
+ ret = kfd_sysfs_create_file(p, &pdd->attr_cu_occupancy,
+ pdd->cu_occupancy_filename);
+ if (ret)
+ pr_warn("Creating CU occupancy file for gpu id %d failed",
+ (int)pdd->dev->id);
+ }
+ }
return ret;
}
-
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
@@ -767,8 +838,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
amdgpu_amdkfd_gpuvm_release_process_vm(
pdd->dev->kgd, pdd->vm);
fput(pdd->drm_file);
- }
- else if (pdd->vm)
+ } else if (pdd->vm)
amdgpu_amdkfd_gpuvm_destroy_process_vm(
pdd->dev->kgd, pdd->vm);
@@ -815,7 +885,10 @@ static void kfd_process_wq_release(struct work_struct *work)
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
sysfs_remove_file(p->kobj, &pdd->attr_vram);
- sysfs_remove_file(p->kobj, &pdd->attr_sdma);
+ if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
+ sysfs_remove_file(p->kobj, &pdd->attr_sdma);
+ sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
+ }
}
kobject_del(p->kobj);
--
2.27.0
More information about the amd-gfx
mailing list