[PATCH 2/3] drm/amd/pp: Delete the wrap layer of smu_allocate/free_memory

Rex Zhu Rex.Zhu at amd.com
Mon Mar 5 10:43:07 UTC 2018


use amdgpu_bo_create/free_kernel directly.

Change-Id: I74f20353edd4e0df6328db66914cd9eabb60e1d7
Signed-off-by: Rex Zhu <Rex.Zhu at amd.com>
---
 drivers/gpu/drm/amd/powerplay/inc/smumgr.h         |   7 -
 drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c   |  39 +++---
 drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h   |  11 +-
 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c   |  53 +++----
 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h   |  11 +-
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c |  52 +++----
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h |  11 +-
 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c      |  51 -------
 .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c   | 155 ++++++++++-----------
 .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h   |  11 +-
 10 files changed, 176 insertions(+), 225 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index e1f6e83..8872c5c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -106,13 +106,6 @@ enum SMU_MAC_DEFINITION {
 extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 					uint16_t msg, uint32_t parameter);
 
-extern int smu_allocate_memory(void *device, uint32_t size,
-			 enum cgs_gpu_mem_type type,
-			 uint32_t byte_align, uint64_t *mc_addr,
-			 void **kptr, void *handle);
-
-extern int smu_free_memory(void *device, void *handle);
-
 extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
 
 extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 7fe4c11..0d2892d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -750,7 +750,6 @@ static int cz_start_smu(struct pp_hwmgr *hwmgr)
 
 static int cz_smu_init(struct pp_hwmgr *hwmgr)
 {
-	uint64_t mc_addr = 0;
 	int ret = 0;
 	struct cz_smumgr *cz_smu;
 
@@ -768,31 +767,31 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr)
 		ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
 		ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
 
-	ret = smu_allocate_memory(hwmgr->device,
+	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 				cz_smu->toc_buffer.data_size,
-				CGS_GPU_MEM_TYPE__GART_CACHEABLE,
 				PAGE_SIZE,
-				&mc_addr,
-				&cz_smu->toc_buffer.kaddr,
-				&cz_smu->toc_buffer.handle);
+				AMDGPU_GEM_DOMAIN_VRAM,
+				&cz_smu->toc_buffer.handle,
+				&cz_smu->toc_buffer.mc_addr,
+				&cz_smu->toc_buffer.kaddr);
 	if (ret != 0)
 		return -1;
 
-	cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-	cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+	cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(cz_smu->toc_buffer.mc_addr);
+	cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(cz_smu->toc_buffer.mc_addr);
 
-	ret = smu_allocate_memory(hwmgr->device,
+	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 				cz_smu->smu_buffer.data_size,
-				CGS_GPU_MEM_TYPE__GART_CACHEABLE,
 				PAGE_SIZE,
-				&mc_addr,
-				&cz_smu->smu_buffer.kaddr,
-				&cz_smu->smu_buffer.handle);
+				AMDGPU_GEM_DOMAIN_VRAM,
+				&cz_smu->smu_buffer.handle,
+				&cz_smu->toc_buffer.mc_addr,
+				&cz_smu->smu_buffer.kaddr);
 	if (ret != 0)
 		return -1;
 
-	cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-	cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+	cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(cz_smu->toc_buffer.mc_addr);
+	cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(cz_smu->toc_buffer.mc_addr);
 
 	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
 		CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
@@ -845,10 +844,12 @@ static int cz_smu_fini(struct pp_hwmgr *hwmgr)
 
 	cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
 	if (cz_smu) {
-		cgs_free_gpu_mem(hwmgr->device,
-				cz_smu->toc_buffer.handle);
-		cgs_free_gpu_mem(hwmgr->device,
-				cz_smu->smu_buffer.handle);
+		amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
+					&cz_smu->toc_buffer.mc_addr,
+					&cz_smu->toc_buffer.kaddr);
+		amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle,
+					&cz_smu->toc_buffer.mc_addr,
+					&cz_smu->smu_buffer.kaddr);
 		kfree(cz_smu);
 	}
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
index 756b2c4..0faaecb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
@@ -60,11 +60,16 @@ enum cz_scratch_entry {
 
 struct cz_buffer_entry {
 	uint32_t data_size;
-	uint32_t mc_addr_low;
-	uint32_t mc_addr_high;
+	union {
+		struct {
+			uint32_t mc_addr_low;
+			uint32_t mc_addr_high;
+		};
+		uint64_t mc_addr;
+	};
 	void *kaddr;
 	enum cz_scratch_entry firmware_ID;
-	unsigned long handle; /* as bo handle used when release bo */
+	struct amdgpu_bo *handle; /* as bo handle used when release bo */
 };
 
 struct cz_register_index_data_pair {
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
index 2d662b4..9379857 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
@@ -292,10 +292,12 @@ static int rv_smu_fini(struct pp_hwmgr *hwmgr)
 	if (priv) {
 		rv_smc_disable_sdma(hwmgr);
 		rv_smc_disable_vcn(hwmgr);
-		cgs_free_gpu_mem(hwmgr->device,
-				priv->smu_tables.entry[WMTABLE].handle);
-		cgs_free_gpu_mem(hwmgr->device,
-				priv->smu_tables.entry[CLOCKTABLE].handle);
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
+					&priv->smu_tables.entry[WMTABLE].table_addr,
+					(void **)(priv->smu_tables.entry[WMTABLE].table));
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[CLOCKTABLE].handle,
+					&priv->smu_tables.entry[CLOCKTABLE].table_addr,
+					(void **)(priv->smu_tables.entry[CLOCKTABLE].table));
 		kfree(hwmgr->smu_backend);
 		hwmgr->smu_backend = NULL;
 	}
@@ -328,7 +330,8 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr)
 	struct rv_smumgr *priv;
 	uint64_t mc_addr;
 	void *kaddr = NULL;
-	unsigned long handle;
+	struct amdgpu_bo *handle;
+	int r;
 
 	priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL);
 
@@ -338,19 +341,16 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr)
 	hwmgr->smu_backend = priv;
 
 	/* allocate space for watermarks table */
-	smu_allocate_memory(hwmgr->device,
+	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 			sizeof(Watermarks_t),
-			CGS_GPU_MEM_TYPE__GART_CACHEABLE,
 			PAGE_SIZE,
+			AMDGPU_GEM_DOMAIN_VRAM,
+			&handle,
 			&mc_addr,
-			&kaddr,
-			&handle);
+			&kaddr);
 
-	PP_ASSERT_WITH_CODE(kaddr,
-			"[rv_smu_init] Out of memory for wmtable.",
-			kfree(hwmgr->smu_backend);
-			hwmgr->smu_backend = NULL;
-			return -EINVAL);
+	if (r)
+		return -EINVAL;
 
 	priv->smu_tables.entry[WMTABLE].version = 0x01;
 	priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
@@ -359,25 +359,25 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr)
 			smu_upper_32_bits(mc_addr);
 	priv->smu_tables.entry[WMTABLE].table_addr_low =
 			smu_lower_32_bits(mc_addr);
+	priv->smu_tables.entry[WMTABLE].table_addr = mc_addr;
 	priv->smu_tables.entry[WMTABLE].table = kaddr;
 	priv->smu_tables.entry[WMTABLE].handle = handle;
 
 	/* allocate space for watermarks table */
-	smu_allocate_memory(hwmgr->device,
+	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 			sizeof(DpmClocks_t),
-			CGS_GPU_MEM_TYPE__GART_CACHEABLE,
 			PAGE_SIZE,
+			AMDGPU_GEM_DOMAIN_VRAM,
+			&handle,
 			&mc_addr,
-			&kaddr,
-			&handle);
-
-	PP_ASSERT_WITH_CODE(kaddr,
-			"[rv_smu_init] Out of memory for CLOCKTABLE.",
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
-			kfree(hwmgr->smu_backend);
-			hwmgr->smu_backend = NULL;
-			return -EINVAL);
+			&kaddr);
+
+	if (r) {
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
+					&priv->smu_tables.entry[WMTABLE].table_addr,
+					(void **)(&priv->smu_tables.entry[WMTABLE].table));
+		return -EINVAL;
+	}
 
 	priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
 	priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
@@ -386,6 +386,7 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr)
 			smu_upper_32_bits(mc_addr);
 	priv->smu_tables.entry[CLOCKTABLE].table_addr_low =
 			smu_lower_32_bits(mc_addr);
+	priv->smu_tables.entry[CLOCKTABLE].table_addr = mc_addr;
 	priv->smu_tables.entry[CLOCKTABLE].table = kaddr;
 	priv->smu_tables.entry[CLOCKTABLE].handle = handle;
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
index caebdbe..aa061e8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
@@ -37,10 +37,15 @@ struct smu_table_entry {
 	uint32_t version;
 	uint32_t size;
 	uint32_t table_id;
-	uint32_t table_addr_high;
-	uint32_t table_addr_low;
+	union {
+		struct {
+			uint32_t table_addr_high;
+			uint32_t table_addr_low;
+		};
+		uint64_t table_addr;
+	};
 	uint8_t *table;
-	unsigned long handle;
+	struct amdgpu_bo *handle;
 };
 
 struct smu_table_array {
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 311ff37..4f5de2e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -587,7 +587,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
 	struct smu7_smumgr *smu_data;
 	uint8_t *internal_buf;
 	uint64_t mc_addr = 0;
-
+	int r;
 	/* Allocate memory for backend private data */
 	smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
 	smu_data->header_buffer.data_size =
@@ -595,47 +595,44 @@ int smu7_init(struct pp_hwmgr *hwmgr)
 
 /* Allocate FW image data structure and header buffer and
  * send the header buffer address to SMU */
-	smu_allocate_memory(hwmgr->device,
+	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 		smu_data->header_buffer.data_size,
-		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
 		PAGE_SIZE,
+		AMDGPU_GEM_DOMAIN_VRAM,
+		&smu_data->header_buffer.handle,
 		&mc_addr,
-		&smu_data->header_buffer.kaddr,
-		&smu_data->header_buffer.handle);
+		&smu_data->header_buffer.kaddr);
+
+	if (r)
+		return -EINVAL;
 
 	smu_data->header = smu_data->header_buffer.kaddr;
 	smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
 	smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-	PP_ASSERT_WITH_CODE((NULL != smu_data->header),
-		"Out of memory.",
-		kfree(hwmgr->smu_backend);
-		cgs_free_gpu_mem(hwmgr->device,
-		(cgs_handle_t)smu_data->header_buffer.handle);
-		return -EINVAL);
+	smu_data->header_buffer.mc_addr = mc_addr;
 
 	if (cgs_is_virtualization_enabled(hwmgr->device))
 		return 0;
 
 	smu_data->smu_buffer.data_size = 200*4096;
-	smu_allocate_memory(hwmgr->device,
+	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 		smu_data->smu_buffer.data_size,
-		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
 		PAGE_SIZE,
+		AMDGPU_GEM_DOMAIN_VRAM,
+		&smu_data->smu_buffer.handle,
 		&mc_addr,
-		&smu_data->smu_buffer.kaddr,
-		&smu_data->smu_buffer.handle);
+		&smu_data->smu_buffer.kaddr);
 
+	if (r) {
+		amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
+					&smu_data->header_buffer.mc_addr,
+					&smu_data->header_buffer.kaddr);
+		return -EINVAL;
+	}
 	internal_buf = smu_data->smu_buffer.kaddr;
 	smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
 	smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-	PP_ASSERT_WITH_CODE((NULL != internal_buf),
-		"Out of memory.",
-		kfree(hwmgr->smu_backend);
-		cgs_free_gpu_mem(hwmgr->device,
-		(cgs_handle_t)smu_data->smu_buffer.handle);
-		return -EINVAL);
+	smu_data->smu_buffer.mc_addr = mc_addr;
 
 	if (smum_is_hw_avfs_present(hwmgr))
 		smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
@@ -650,9 +647,14 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
 
-	smu_free_memory(hwmgr->device, (void *) smu_data->header_buffer.handle);
+	amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
+					&smu_data->header_buffer.mc_addr,
+					&smu_data->header_buffer.kaddr);
+
 	if (!cgs_is_virtualization_enabled(hwmgr->device))
-		smu_free_memory(hwmgr->device, (void *) smu_data->smu_buffer.handle);
+		amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
+					&smu_data->smu_buffer.mc_addr,
+					&smu_data->smu_buffer.kaddr);
 
 	kfree(hwmgr->smu_backend);
 	hwmgr->smu_backend = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index c87263b..af161f1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -31,10 +31,15 @@
 
 struct smu7_buffer_entry {
 	uint32_t data_size;
-	uint32_t mc_addr_low;
-	uint32_t mc_addr_high;
+	union {
+		struct {
+			uint32_t mc_addr_low;
+			uint32_t mc_addr_high;
+		};
+		uint64_t mc_addr;
+	};
 	void *kaddr;
-	unsigned long  handle;
+	struct amdgpu_bo *handle;
 };
 
 struct smu7_avfs {
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 43b1010..3645127 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -144,57 +144,6 @@ int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 						hwmgr, msg, parameter);
 }
 
-int smu_allocate_memory(void *device, uint32_t size,
-			 enum cgs_gpu_mem_type type,
-			 uint32_t byte_align, uint64_t *mc_addr,
-			 void **kptr, void *handle)
-{
-	int ret = 0;
-	cgs_handle_t cgs_handle;
-
-	if (device == NULL || handle == NULL ||
-	    mc_addr == NULL || kptr == NULL)
-		return -EINVAL;
-
-	ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
-				(cgs_handle_t *)handle);
-	if (ret)
-		return -ENOMEM;
-
-	cgs_handle = *(cgs_handle_t *)handle;
-
-	ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr);
-	if (ret)
-		goto error_gmap;
-
-	ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr);
-	if (ret)
-		goto error_kmap;
-
-	return 0;
-
-error_kmap:
-	cgs_gunmap_gpu_mem(device, cgs_handle);
-
-error_gmap:
-	cgs_free_gpu_mem(device, cgs_handle);
-	return ret;
-}
-
-int smu_free_memory(void *device, void *handle)
-{
-	cgs_handle_t cgs_handle = (cgs_handle_t)handle;
-
-	if (device == NULL || handle == NULL)
-		return -EINVAL;
-
-	cgs_kunmap_gpu_mem(device, cgs_handle);
-	cgs_gunmap_gpu_mem(device, cgs_handle);
-	cgs_free_gpu_mem(device, cgs_handle);
-
-	return 0;
-}
-
 int smum_init_smc_table(struct pp_hwmgr *hwmgr)
 {
 	if (NULL != hwmgr->smumgr_funcs->init_smc_table)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 68db582..214486c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -381,7 +381,8 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
 	struct vega10_smumgr *priv;
 	uint64_t mc_addr;
 	void *kaddr = NULL;
-	unsigned long handle, tools_size;
+	unsigned long tools_size;
+	struct amdgpu_bo *handle;
 	int ret;
 	struct cgs_firmware_info info = {0};
 
@@ -399,20 +400,16 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
 	hwmgr->smu_backend = priv;
 
 	/* allocate space for pptable */
-	smu_allocate_memory(hwmgr->device,
+	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 			sizeof(PPTable_t),
-			CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
 			PAGE_SIZE,
+			AMDGPU_GEM_DOMAIN_VRAM,
+			&handle,
 			&mc_addr,
-			&kaddr,
-			&handle);
-
-	PP_ASSERT_WITH_CODE(kaddr,
-			"[vega10_smu_init] Out of memory for pptable.",
-			kfree(hwmgr->smu_backend);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)handle);
-			return -EINVAL);
+			&kaddr);
+
+	if (ret)
+		return -EINVAL;
 
 	priv->smu_tables.entry[PPTABLE].version = 0x01;
 	priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t);
@@ -421,26 +418,21 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
 			smu_upper_32_bits(mc_addr);
 	priv->smu_tables.entry[PPTABLE].table_addr_low =
 			smu_lower_32_bits(mc_addr);
+	priv->smu_tables.entry[PPTABLE].mc_addr = mc_addr;
 	priv->smu_tables.entry[PPTABLE].table = kaddr;
 	priv->smu_tables.entry[PPTABLE].handle = handle;
 
 	/* allocate space for watermarks table */
-	smu_allocate_memory(hwmgr->device,
+	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 			sizeof(Watermarks_t),
-			CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
 			PAGE_SIZE,
+			AMDGPU_GEM_DOMAIN_VRAM,
+			&handle,
 			&mc_addr,
-			&kaddr,
-			&handle);
-
-	PP_ASSERT_WITH_CODE(kaddr,
-			"[vega10_smu_init] Out of memory for wmtable.",
-			kfree(hwmgr->smu_backend);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)handle);
-			return -EINVAL);
+			&kaddr);
+
+	if (ret)
+		goto err0;
 
 	priv->smu_tables.entry[WMTABLE].version = 0x01;
 	priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
@@ -453,24 +445,16 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
 	priv->smu_tables.entry[WMTABLE].handle = handle;
 
 	/* allocate space for AVFS table */
-	smu_allocate_memory(hwmgr->device,
+	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 			sizeof(AvfsTable_t),
-			CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
 			PAGE_SIZE,
+			AMDGPU_GEM_DOMAIN_VRAM,
+			&handle,
 			&mc_addr,
-			&kaddr,
-			&handle);
-
-	PP_ASSERT_WITH_CODE(kaddr,
-			"[vega10_smu_init] Out of memory for avfs table.",
-			kfree(hwmgr->smu_backend);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)handle);
-			return -EINVAL);
+			&kaddr);
+
+	if (ret)
+		goto err1;
 
 	priv->smu_tables.entry[AVFSTABLE].version = 0x01;
 	priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t);
@@ -484,50 +468,27 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
 
 	tools_size = 0x19000;
 	if (tools_size) {
-		smu_allocate_memory(hwmgr->device,
+		ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 				tools_size,
-				CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
 				PAGE_SIZE,
+				AMDGPU_GEM_DOMAIN_VRAM,
+				&handle,
 				&mc_addr,
-				&kaddr,
-				&handle);
-
-		if (kaddr) {
-			priv->smu_tables.entry[TOOLSTABLE].version = 0x01;
-			priv->smu_tables.entry[TOOLSTABLE].size = tools_size;
-			priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG;
-			priv->smu_tables.entry[TOOLSTABLE].table_addr_high =
-					smu_upper_32_bits(mc_addr);
-			priv->smu_tables.entry[TOOLSTABLE].table_addr_low =
-					smu_lower_32_bits(mc_addr);
-			priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
-			priv->smu_tables.entry[TOOLSTABLE].handle = handle;
-		}
+				&kaddr);
+		if (ret)
+			goto err2;
 	}
 
 	/* allocate space for AVFS Fuse table */
-	smu_allocate_memory(hwmgr->device,
+	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
 			sizeof(AvfsFuseOverride_t),
-			CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
 			PAGE_SIZE,
+			AMDGPU_GEM_DOMAIN_VRAM,
+			&handle,
 			&mc_addr,
-			&kaddr,
-			&handle);
-
-	PP_ASSERT_WITH_CODE(kaddr,
-			"[vega10_smu_init] Out of memory for avfs fuse table.",
-			kfree(hwmgr->smu_backend);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
-			cgs_free_gpu_mem(hwmgr->device,
-			(cgs_handle_t)handle);
-			return -EINVAL);
+			&kaddr);
+	if (ret)
+		goto err3;
 
 	priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01;
 	priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t);
@@ -540,6 +501,25 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
 	priv->smu_tables.entry[AVFSFUSETABLE].handle = handle;
 
 	return 0;
+
+err3:
+	if (priv->smu_tables.entry[TOOLSTABLE].table)
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle,
+				&priv->smu_tables.entry[TOOLSTABLE].mc_addr,
+				(void **)(&priv->smu_tables.entry[TOOLSTABLE].table));
+err2:
+	amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle,
+				&priv->smu_tables.entry[AVFSTABLE].mc_addr,
+				(void **)(&priv->smu_tables.entry[AVFSTABLE].table));
+err1:
+	amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
+				&priv->smu_tables.entry[WMTABLE].mc_addr,
+				(void **)(&priv->smu_tables.entry[WMTABLE].table));
+err0:
+	amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
+			&priv->smu_tables.entry[PPTABLE].mc_addr,
+			(void **)(&priv->smu_tables.entry[PPTABLE].table));
+	return -EINVAL;
 }
 
 static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
@@ -548,17 +528,22 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
 			(struct vega10_smumgr *)(hwmgr->smu_backend);
 
 	if (priv) {
-		cgs_free_gpu_mem(hwmgr->device,
-				(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
-		cgs_free_gpu_mem(hwmgr->device,
-				(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
-		cgs_free_gpu_mem(hwmgr->device,
-				(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
+				&priv->smu_tables.entry[PPTABLE].mc_addr,
+				(void **)(&priv->smu_tables.entry[PPTABLE].table));
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
+					&priv->smu_tables.entry[WMTABLE].mc_addr,
+					(void **)(&priv->smu_tables.entry[WMTABLE].table));
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle,
+					&priv->smu_tables.entry[AVFSTABLE].mc_addr,
+					(void **)(&priv->smu_tables.entry[AVFSTABLE].table));
 		if (priv->smu_tables.entry[TOOLSTABLE].table)
-			cgs_free_gpu_mem(hwmgr->device,
-					(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
-		cgs_free_gpu_mem(hwmgr->device,
-				(cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
+			amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle,
+					&priv->smu_tables.entry[TOOLSTABLE].mc_addr,
+					(void **)(&priv->smu_tables.entry[TOOLSTABLE].table));
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSFUSETABLE].handle,
+					&priv->smu_tables.entry[AVFSFUSETABLE].mc_addr,
+					(void **)(&priv->smu_tables.entry[AVFSFUSETABLE].table));
 		kfree(hwmgr->smu_backend);
 		hwmgr->smu_backend = NULL;
 	}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index 0695455..ffb5e69 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -38,10 +38,15 @@ struct smu_table_entry {
 	uint32_t version;
 	uint32_t size;
 	uint32_t table_id;
-	uint32_t table_addr_high;
-	uint32_t table_addr_low;
+	union {
+		struct {
+			uint32_t table_addr_high;
+			uint32_t table_addr_low;
+		};
+		uint64_t mc_addr;
+	};
 	uint8_t *table;
-	unsigned long handle;
+	struct amdgpu_bo *handle;
 };
 
 struct smu_table_array {
-- 
1.9.1



More information about the amd-gfx mailing list