[PATCH] drm/amdgpu: add drv_vram_usage_va for virt data exchange
Liu01, Tong (Esther)
Tong.Liu01 at amd.com
Mon Nov 21 08:14:03 UTC 2022
[AMD Official Use Only - General]
Hi,
Please review, thanks
Kind regards,
Esther
-----Original Message-----
From: Tong Liu01 <Tong.Liu01 at amd.com>
Sent: 2022年11月18日星期五 上午10:56
To: amd-gfx at lists.freedesktop.org
Cc: Quan, Evan <Evan.Quan at amd.com>; Chen, Horace <Horace.Chen at amd.com>; Tuikov, Luben <Luben.Tuikov at amd.com>; Koenig, Christian <Christian.Koenig at amd.com>; Deucher, Alexander <Alexander.Deucher at amd.com>; Xiao, Jack <Jack.Xiao at amd.com>; Zhang, Hawking <Hawking.Zhang at amd.com>; Liu, Monk <Monk.Liu at amd.com>; Xu, Feifei <Feifei.Xu at amd.com>; Wang, Yang(Kevin) <KevinYang.Wang at amd.com>; Liu01, Tong (Esther) <Tong.Liu01 at amd.com>; Zha, YiFan(Even) <Yifan.Zha at amd.com>; Liu01, Tong (Esther) <Tong.Liu01 at amd.com>
Subject: [PATCH] drm/amdgpu: add drv_vram_usage_va for virt data exchange
For vram_usagebyfirmware_v2_2, fw_vram_reserve is not used. So fw_vram_usage_va is NULL, and cannot do virt data exchange anymore. Should add drv_vram_usage_va to do virt data exchange in vram_usagebyfirmware_v2_2 case. And refine some code style checks in pre add vram reservation logic patch
Signed-off-by: Tong Liu01 <Tong.Liu01 at amd.com>
---
.../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 16 ++---
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ++-
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 +
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 59 +++++++++++++------
4 files changed, 54 insertions(+), 29 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 9b97fa39d47a..e40df72c138a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -104,7 +104,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes) {
- uint32_t start_addr, fw_size, drv_size;
+ u32 start_addr, fw_size, drv_size;
start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
@@ -116,7 +116,7 @@ static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
drv_size);
if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
- (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ (u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->mman.fw_vram_usage_start_offset = (start_addr & @@ -133,7 +133,7 @@ static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev, static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes) {
- uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size;
+ u32 fw_start_addr, fw_size, drv_start_addr, drv_size;
fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
@@ -147,14 +147,16 @@ static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
drv_start_addr,
drv_size);
- if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
+ if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0) {
/* Firmware request VRAM reservation for SR-IOV */
adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.fw_vram_usage_size = fw_size << 10;
}
- if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
+ if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0) {
/* driver request VRAM reservation for SR-IOV */
adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; @@ -172,8 +174,8 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
vram_usagebyfirmware);
struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
- uint16_t data_offset;
- uint8_t frev, crev;
+ u16 data_offset;
+ u8 frev, crev;
int usage_bytes = 0;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 52f2282411cb..dd8b6a11db9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1570,7 +1570,7 @@ static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev) {
amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
NULL,
- NULL);
+ &adev->mman.drv_vram_usage_va);
}
/**
@@ -1608,8 +1608,9 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
*/
static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) {
- uint64_t vram_size = adev->gmc.visible_vram_size;
+ u64 vram_size = adev->gmc.visible_vram_size;
+ adev->mman.drv_vram_usage_va = NULL;
adev->mman.drv_vram_usage_reserved_bo = NULL;
if (adev->mman.drv_vram_usage_size == 0 || @@ -1621,7 +1622,7 @@ static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
adev->mman.drv_vram_usage_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.drv_vram_usage_reserved_bo,
- NULL);
+ &adev->mman.drv_vram_usage_va);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index b391c8d076ff..b4d8ba2789f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -90,6 +90,7 @@ struct amdgpu_mman {
u64 drv_vram_usage_start_offset;
u64 drv_vram_usage_size;
struct amdgpu_bo *drv_vram_usage_reserved_bo;
+ void *drv_vram_usage_va;
/* PAGE_SIZE'd BO for process memory r/w over SDMA. */
struct amdgpu_bo *sdma_access_bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index a226a6c48fb7..e80033e24d48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -428,11 +428,18 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
struct eeprom_table_record bp;
uint64_t retired_page;
uint32_t bp_idx, bp_cnt;
+ void *vram_usage_va = NULL;
+
+ if (adev->mman.fw_vram_usage_va != NULL) {
+ vram_usage_va = adev->mman.fw_vram_usage_va;
+ } else {
+ vram_usage_va = adev->mman.drv_vram_usage_va;
+ }
if (bp_block_size) {
bp_cnt = bp_block_size / sizeof(uint64_t);
for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
- retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
+ retired_page = *(uint64_t *)(vram_usage_va +
bp_block_offset + bp_idx * sizeof(uint64_t));
bp.retired_page = retired_page;
@@ -643,7 +650,11 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
- if (adev->mman.fw_vram_usage_va != NULL) {
+ if (adev->mman.fw_vram_usage_va != NULL &&
+ adev->mman.drv_vram_usage_va != NULL) {
+ DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
+ } else if (adev->mman.fw_vram_usage_va != NULL ||
+ adev->mman.drv_vram_usage_va != NULL) {
/* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
@@ -666,32 +677,42 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
uint32_t bp_block_size = 0;
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
- if (adev->mman.fw_vram_usage_va != NULL) {
-
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
- adev->virt.fw_reserve.p_vf2pf =
- (struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ if (adev->mman.fw_vram_usage_va != NULL ||
+ adev->mman.drv_vram_usage_va != NULL) {
+
+ if (adev->mman.fw_vram_usage_va != NULL) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ } else if (adev->mman.drv_vram_usage_va != NULL) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ }
amdgpu_virt_read_pf2vf_data(adev);
amdgpu_virt_write_vf2pf_data(adev);
/* bad page handling for version 2 */
if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
- pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
+ pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info
+*)adev->virt.fw_reserve.p_pf2vf;
- bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
- ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
- bp_block_size = pf2vf_v2->bp_block_size;
+ bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
+ ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
+ bp_block_size = pf2vf_v2->bp_block_size;
- if (bp_block_size && !adev->virt.ras_init_done)
- amdgpu_virt_init_ras_err_handler_data(adev);
+ if (bp_block_size && !adev->virt.ras_init_done)
+ amdgpu_virt_init_ras_err_handler_data(adev);
- if (adev->virt.ras_init_done)
- amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
- }
+ if (adev->virt.ras_init_done)
+ amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
+ }
}
}
--
2.25.1
More information about the amd-gfx
mailing list