[PATCH 2/2] drm/amd/powerplay: allocate one piece of VRAM for all tables transferring
Quan, Evan
Evan.Quan at amd.com
Tue Dec 31 02:58:20 UTC 2019
Hi Alex,
Please check the new patch sets.
https://lists.freedesktop.org/archives/amd-gfx/2019-December/044352.html
https://lists.freedesktop.org/archives/amd-gfx/2019-December/044353.html
Regards,
Evan
> -----Original Message-----
> From: Alex Deucher <alexdeucher at gmail.com>
> Sent: Monday, December 30, 2019 11:22 PM
> To: Quan, Evan <Evan.Quan at amd.com>
> Cc: amd-gfx list <amd-gfx at lists.freedesktop.org>
> Subject: Re: [PATCH 2/2] drm/amd/powerplay: allocate one piece of VRAM for
> all tables transferring
>
> On Mon, Dec 30, 2019 at 5:50 AM Evan Quan <evan.quan at amd.com> wrote:
> >
> > Simplify the table transferring between driver and SMU and use less
> > VRAM.
> >
> > Change-Id: I3f9b54fd9b8c0bcaeb533fc1a07bb06050fefbd8
> > Signed-off-by: Evan Quan <evan.quan at amd.com>
> > ---
> > drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 101 ++++++++++--------
> > drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 2 +-
> > .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 3 +-
> > drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 4 +
> > drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 4 +
> > drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 4 +-
> > drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 10 +-
> > drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 4 +
> > 8 files changed, 73 insertions(+), 59 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index c3cb1b8f43b5..bd3dbd1a0ad3 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -490,7 +490,7 @@ int smu_update_table(struct smu_context *smu,
> enum
> > smu_table_id table_index, int {
> > struct smu_table_context *smu_table = &smu->smu_table;
> > struct amdgpu_device *adev = smu->adev;
> > - struct smu_table *table = smu_table->driver_table;
> > + struct smu_table *table = &smu_table->driver_table;
> > int table_id = smu_table_get_index(smu, table_index);
> > uint32_t table_size;
> > int ret = 0;
> > @@ -941,24 +941,26 @@ static int smu_init_fb_allocations(struct
> smu_context *smu)
> > struct amdgpu_device *adev = smu->adev;
> > struct smu_table_context *smu_table = &smu->smu_table;
> > struct smu_table *tables = smu_table->tables;
> > - struct smu_table **driver_table = &(smu_table->driver_table);
> > + struct smu_table *driver_table = &(smu_table->driver_table);
> > uint32_t max_table_size = 0;
> > - int ret, i, index = 0;
> > + int ret, i;
> >
> > - for (i = 0; i < SMU_TABLE_COUNT; i++) {
> > - if (tables[i].size == 0)
> > - continue;
> > + /* VRAM allocation for tool table */
> > + if (tables[SMU_TABLE_PMSTATUSLOG].size) {
> > ret = amdgpu_bo_create_kernel(adev,
> > - tables[i].size,
> > - tables[i].align,
> > - tables[i].domain,
> > - &tables[i].bo,
> > - &tables[i].mc_address,
> > - &tables[i].cpu_addr);
> > - if (ret)
> > - goto failed;
> > + tables[SMU_TABLE_PMSTATUSLOG].size,
> > + tables[SMU_TABLE_PMSTATUSLOG].align,
> > + tables[SMU_TABLE_PMSTATUSLOG].domain,
> > + &tables[SMU_TABLE_PMSTATUSLOG].bo,
> > +
> &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> > + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
> > + if (ret) {
> > + pr_err("VRAM allocation for tool table failed!\n");
> > + return ret;
> > + }
> > }
> >
> > + /* VRAM allocation for driver table */
> > for (i = 0; i < SMU_TABLE_COUNT; i++) {
> > if (tables[i].size == 0)
> > continue;
> > @@ -966,24 +968,29 @@ static int smu_init_fb_allocations(struct
> smu_context *smu)
> > if (i == SMU_TABLE_PMSTATUSLOG)
> > continue;
> >
> > - if (max_table_size < tables[i].size) {
> > + if (max_table_size < tables[i].size)
> > max_table_size = tables[i].size;
> > - index = i;
> > - }
> > }
> >
> > - *driver_table = &tables[index];
> > -
> > - return 0;
> > -failed:
> > - while (--i >= 0) {
> > - if (tables[i].size == 0)
> > - continue;
> > - amdgpu_bo_free_kernel(&tables[i].bo,
> > - &tables[i].mc_address,
> > - &tables[i].cpu_addr);
> > -
> > + driver_table->size = max_table_size;
> > + driver_table->align = PAGE_SIZE;
> > + driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
> > +
> > + ret = amdgpu_bo_create_kernel(adev,
> > + driver_table->size,
> > + driver_table->align,
> > + driver_table->domain,
> > + &driver_table->bo,
> > + &driver_table->mc_address,
> > + &driver_table->cpu_addr);
> > + if (ret) {
> > + pr_err("VRAM allocation for driver table failed!\n");
> > + if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
> > +
> amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
> > +
> &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> > +
> > + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
> > }
> > +
> > return ret;
>
> Shouldn't this change be merged into the previous patch? Otherwise, we'll
> break I think.
>
> > }
> >
> > @@ -991,18 +998,19 @@ static int smu_fini_fb_allocations(struct
> > smu_context *smu) {
> > struct smu_table_context *smu_table = &smu->smu_table;
> > struct smu_table *tables = smu_table->tables;
> > - uint32_t i = 0;
> > + struct smu_table *driver_table = &(smu_table->driver_table);
> >
> > if (!tables)
> > return 0;
> >
> > - for (i = 0; i < SMU_TABLE_COUNT; i++) {
> > - if (tables[i].size == 0)
> > - continue;
> > - amdgpu_bo_free_kernel(&tables[i].bo,
> > - &tables[i].mc_address,
> > - &tables[i].cpu_addr);
> > - }
> > + if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
> > + amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
> > + &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> > +
> > + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
> > +
> > + amdgpu_bo_free_kernel(&driver_table->bo,
> > + &driver_table->mc_address,
> > + &driver_table->cpu_addr);
> >
> > return 0;
> > }
> > @@ -1913,26 +1921,25 @@ int smu_set_df_cstate(struct smu_context *smu,
> >
> > int smu_write_watermarks_table(struct smu_context *smu) {
> > - int ret = 0;
> > - struct smu_table_context *smu_table = &smu->smu_table;
> > - struct smu_table *table = NULL;
> > + void *watermarks_table = smu->smu_table.watermarks_table;
> >
> > - table = &smu_table->tables[SMU_TABLE_WATERMARKS];
> > -
> > - if (!table->cpu_addr)
> > + if (!watermarks_table)
> > return -EINVAL;
> >
> > - ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table-
> >cpu_addr,
> > + return smu_update_table(smu,
> > + SMU_TABLE_WATERMARKS,
> > + 0,
> > + watermarks_table,
> > true);
> > -
> > - return ret;
> > }
> >
> > int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
> > struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
> > {
> > - struct smu_table *watermarks = &smu-
> >smu_table.tables[SMU_TABLE_WATERMARKS];
> > - void *table = watermarks->cpu_addr;
> > + void *table = smu->smu_table.watermarks_table;
> > +
> > + if (!table)
> > + return -EINVAL;
> >
> > mutex_lock(&smu->mutex);
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > index e89965e5fdcb..064b5201a8a7 100644
> > --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > @@ -2022,7 +2022,7 @@ static int arcturus_i2c_eeprom_read_data(struct
> i2c_adapter *control,
> > SwI2cRequest_t req;
> > struct amdgpu_device *adev = to_amdgpu_device(control);
> > struct smu_table_context *smu_table = &adev->smu.smu_table;
> > - struct smu_table *table = &smu_table-
> >tables[SMU_TABLE_I2C_COMMANDS];
> > + struct smu_table *table = &smu_table->driver_table;
> >
> > memset(&req, 0, sizeof(req));
> > arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
> > diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > index ed193adc881c..121bf81eced5 100644
> > --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > @@ -254,12 +254,13 @@ struct smu_table_context
> > unsigned long metrics_time;
> > void *metrics_table;
> > void *clocks_table;
> > + void *watermarks_table;
>
> Can you split out the watermarks change as a separate patch or at
> least explain why you are changing how it's handled?
>
> Alex
>
> >
> > void *max_sustainable_clocks;
> > struct smu_bios_boot_up_values boot_values;
> > void *driver_pptable;
> > struct smu_table *tables;
> > - struct smu_table *driver_table;
> > + struct smu_table driver_table;
> > struct smu_table memory_pool;
> > uint8_t thermal_controller_type;
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> > index ed147dd51325..aa206bde619b 100644
> > --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> > @@ -555,6 +555,10 @@ static int navi10_tables_init(struct smu_context
> *smu, struct smu_table *tables)
> > return -ENOMEM;
> > smu_table->metrics_time = 0;
> >
> > + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t),
> GFP_KERNEL);
> > + if (!smu_table->watermarks_table)
> > + return -ENOMEM;
> > +
> > return 0;
> > }
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> > index c4b5984c86d9..861e6410363b 100644
> > --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> > @@ -209,6 +209,10 @@ static int renoir_tables_init(struct smu_context
> *smu, struct smu_table *tables)
> > return -ENOMEM;
> > smu_table->metrics_time = 0;
> >
> > + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t),
> GFP_KERNEL);
> > + if (!smu_table->watermarks_table)
> > + return -ENOMEM;
> > +
> > return 0;
> > }
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > index 79a63edcd7ba..962e97860fe8 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > @@ -450,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context
> *smu)
> >
> > kfree(smu_table->tables);
> > kfree(smu_table->metrics_table);
> > + kfree(smu_table->watermarks_table);
> > smu_table->tables = NULL;
> > smu_table->metrics_table = NULL;
> > + smu_table->watermarks_table = NULL;
> > smu_table->metrics_time = 0;
> >
> > ret = smu_v11_0_fini_dpm_context(smu);
> > @@ -776,7 +778,7 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct
> smu_context *smu)
> >
> > int smu_v11_0_set_driver_table_location(struct smu_context *smu)
> > {
> > - struct smu_table *driver_table = smu->smu_table.driver_table;
> > + struct smu_table *driver_table = &smu->smu_table.driver_table;
> > int ret = 0;
> >
> > if (driver_table->mc_address) {
> > diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> > index cd2be9fb2513..2b1ef9eb0db6 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> > @@ -318,14 +318,6 @@ int smu_v12_0_fini_smc_tables(struct smu_context
> *smu)
> > int smu_v12_0_populate_smc_tables(struct smu_context *smu)
> > {
> > struct smu_table_context *smu_table = &smu->smu_table;
> > - struct smu_table *table = NULL;
> > -
> > - table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
> > - if (!table)
> > - return -EINVAL;
> > -
> > - if (!table->cpu_addr)
> > - return -EINVAL;
> >
> > return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table-
> >clocks_table, false);
> > }
> > @@ -517,7 +509,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct
> smu_context *smu, enum smu_clk_
> >
> > int smu_v12_0_set_driver_table_location(struct smu_context *smu)
> > {
> > - struct smu_table *driver_table = smu->smu_table.driver_table;
> > + struct smu_table *driver_table = &smu->smu_table.driver_table;
> > int ret = 0;
> >
> > if (driver_table->mc_address) {
> > diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> > index 12f97956058b..38febd5ca4da 100644
> > --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> > @@ -338,6 +338,10 @@ static int vega20_tables_init(struct smu_context
> *smu, struct smu_table *tables)
> > return -ENOMEM;
> > smu_table->metrics_time = 0;
> >
> > + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t),
> GFP_KERNEL);
> > + if (!smu_table->watermarks_table)
> > + return -ENOMEM;
> > +
> > return 0;
> > }
> >
> > --
> > 2.24.0
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx at lists.freedesktop.org
> >
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.free
> desktop.org%2Fmailman%2Flistinfo%2Famd-
> gfx&data=02%7C01%7Cevan.quan%40amd.com%7Cdd3a4f9d66ba4e7085
> ba08d78d3c0c38%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637
> 133161355995859&sdata=KYYcYqB%2Bd3vWoIHVYVSed8bXtqrfg1dWrlFT0
> QMFhRs%3D&reserved=0
More information about the amd-gfx
mailing list