[PATCH 1/3] drm/xe/pf: Use GuC Buffer Cache during policy provisioning
Michal Wajdeczko
michal.wajdeczko at intel.com
Fri Jan 24 18:42:07 UTC 2025
On 22.01.2025 23:29, Cavitt, Jonathan wrote:
> -----Original Message-----
> From: Intel-xe <intel-xe-bounces at lists.freedesktop.org> On Behalf Of Michal Wajdeczko
> Sent: Wednesday, January 22, 2025 1:50 PM
> To: intel-xe at lists.freedesktop.org
> Cc: Wajdeczko, Michal <Michal.Wajdeczko at intel.com>; Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Subject: [PATCH 1/3] drm/xe/pf: Use GuC Buffer Cache during policy provisioning
>>
>> Start using GuC buffer cache for the SRIOV policy configuration
>> actions. This is a required step before we could declare SRIOV
>> PF as being a reclaim safe.
>>
>> Signed-off-by: Michal Wajdeczko <michal.wajdeczko at intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
>
> LGTM, though I have to wonder if the pf_send_policy_klvs helper function
> is particularly necessary now that it's been mostly gutted.
it makes conversion from "gt/buf" layers to lower "guc/addr" layers
> Reviewed-by: Jonathan Cavitt <jonathan.cavitt at intel.com>
> -Jonathan Cavitt
thanks!
btw, I plan to merge this patch separately from this series
Michal
>
>> ---
>> drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c | 50 +++++++++++-----------
>> 1 file changed, 24 insertions(+), 26 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
>> index c00fb354705f..4445f660e6d1 100644
>> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
>> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
>> @@ -10,6 +10,7 @@
>> #include "xe_gt_sriov_pf_helpers.h"
>> #include "xe_gt_sriov_pf_policy.h"
>> #include "xe_gt_sriov_printk.h"
>> +#include "xe_guc_buf.h"
>> #include "xe_guc_ct.h"
>> #include "xe_guc_klv_helpers.h"
>> #include "xe_pm.h"
>> @@ -34,48 +35,28 @@ static int guc_action_update_vgt_policy(struct xe_guc *guc, u64 addr, u32 size)
>> * Return: number of KLVs that were successfully parsed and saved,
>> * negative error code on failure.
>> */
>> -static int pf_send_policy_klvs(struct xe_gt *gt, const u32 *klvs, u32 num_dwords)
>> +static int pf_send_policy_klvs(struct xe_gt *gt, struct xe_guc_buf buf, u32 num_dwords)
>> {
>> - const u32 bytes = num_dwords * sizeof(u32);
>> - struct xe_tile *tile = gt_to_tile(gt);
>> - struct xe_device *xe = tile_to_xe(tile);
>> struct xe_guc *guc = >->uc.guc;
>> - struct xe_bo *bo;
>> - int ret;
>>
>> - bo = xe_bo_create_pin_map(xe, tile, NULL,
>> - ALIGN(bytes, PAGE_SIZE),
>> - ttm_bo_type_kernel,
>> - XE_BO_FLAG_VRAM_IF_DGFX(tile) |
>> - XE_BO_FLAG_GGTT);
>> - if (IS_ERR(bo))
>> - return PTR_ERR(bo);
>> -
>> - xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
>> -
>> - ret = guc_action_update_vgt_policy(guc, xe_bo_ggtt_addr(bo), num_dwords);
>> -
>> - xe_bo_unpin_map_no_vm(bo);
>> -
>> - return ret;
>> + return guc_action_update_vgt_policy(guc, xe_guc_buf_flush(buf), num_dwords);
>> }
>>
>> /*
>> * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
>> * negative error code on failure.
>> */
>> -static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
>> - const u32 *klvs, u32 num_dwords)
>> +static int pf_push_policy_buf_klvs(struct xe_gt *gt, u32 num_klvs,
>> + struct xe_guc_buf buf, u32 num_dwords)
>> {
>> int ret;
>>
>> - xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
>> -
>> - ret = pf_send_policy_klvs(gt, klvs, num_dwords);
>> + ret = pf_send_policy_klvs(gt, buf, num_dwords);
>>
>> if (ret != num_klvs) {
>> int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
>> struct drm_printer p = xe_gt_info_printer(gt);
>> + void *klvs = xe_guc_buf_cpu_ptr(buf);
>>
>> xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n",
>> num_klvs, str_plural(num_klvs), ERR_PTR(err));
>> @@ -86,6 +67,23 @@ static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
>> return 0;
>> }
>>
>> +/*
>> + * Return: 0 on success, -ENOBUFS if there is no free buffer for the indirect data,
>> + * negative error code on failure.
>> + */
>> +static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
>> + const u32 *klvs, u32 num_dwords)
>> +{
>> + CLASS(xe_guc_buf_from_data, buf)(>->uc.guc.buf, klvs, num_dwords * sizeof(u32));
>> +
>> + xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
>> +
>> + if (!xe_guc_buf_is_valid(buf))
>> + return -ENOBUFS;
>> +
>> + return pf_push_policy_buf_klvs(gt, num_klvs, buf, num_dwords);
>> +}
>> +
>> static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value)
>> {
>> u32 klv[] = {
>> --
>> 2.47.1
>>
>>
More information about the Intel-xe
mailing list