[PATCH 6/6] drm/amdgpu/sienna_cichlid: add SMU i2c support
Deucher, Alexander
Alexander.Deucher at amd.com
Tue Jul 21 17:02:55 UTC 2020
[AMD Official Use Only - Internal Distribution Only]
I tried that at first, but the smu i2c interface structures are different per asic.
Alex
________________________________
From: Grodzovsky, Andrey <Andrey.Grodzovsky at amd.com>
Sent: Tuesday, July 21, 2020 1:01 PM
To: Alex Deucher <alexdeucher at gmail.com>; amd-gfx at lists.freedesktop.org <amd-gfx at lists.freedesktop.org>
Cc: Deucher, Alexander <Alexander.Deucher at amd.com>
Subject: Re: [PATCH 6/6] drm/amdgpu/sienna_cichlid: add SMU i2c support
Looks like same code as arcturus - should we make it common helper code and
reuse in both ?
Andrey
On 7/21/20 12:52 PM, Alex Deucher wrote:
> Enable SMU i2c bus access for sienna_cichlid asics.
>
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> ---
> .../drm/amd/powerplay/sienna_cichlid_ppt.c | 239 ++++++++++++++++++
> 1 file changed, 239 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
> index 5faef41b63a3..e1857fbb0a6f 100644
> --- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
> @@ -23,6 +23,7 @@
>
> #include <linux/firmware.h>
> #include <linux/pci.h>
> +#include <linux/i2c.h>
> #include "amdgpu.h"
> #include "amdgpu_smu.h"
> #include "smu_internal.h"
> @@ -52,6 +53,8 @@
> #undef pr_info
> #undef pr_debug
>
> +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
> +
> #define FEATURE_MASK(feature) (1ULL << feature)
> #define SMC_DPM_FEATURE ( \
> FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
> @@ -455,6 +458,8 @@ static int sienna_cichlid_tables_init(struct smu_context *smu, struct smu_table
> PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
> SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
> PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
> + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
> + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
> SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
> PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
> SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
> @@ -2487,6 +2492,238 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
> dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]);
> }
>
> +static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
> + uint8_t address, uint32_t numbytes,
> + uint8_t *data)
> +{
> + int i;
> +
> + BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
> +
> + req->I2CcontrollerPort = 0;
> + req->I2CSpeed = 2;
> + req->SlaveAddress = address;
> + req->NumCmds = numbytes;
> +
> + for (i = 0; i < numbytes; i++) {
> + SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
> +
> + /* First 2 bytes are always write for lower 2b EEPROM address */
> + if (i < 2)
> + cmd->CmdConfig = CMDCONFIG_READWRITE_MASK;
> + else
> + cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0;
> +
> +
> + /* Add RESTART for read after address filled */
> + cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
> +
> + /* Add STOP in the end */172.31.4.187
> + cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
> +
> + /* Fill with data regardless if read or write to simplify code */
> + cmd->ReadWriteData = data[i];
> + }
> +}
> +
> +static int sienna_cichlid_i2c_read_data(struct i2c_adapter *control,
> + uint8_t address,
> + uint8_t *data,
> + uint32_t numbytes)
> +{
> + uint32_t i, ret = 0;
> + SwI2cRequest_t req;
> + struct amdgpu_device *adev = to_amdgpu_device(control);
> + struct smu_table_context *smu_table = &adev->smu.smu_table;
> + struct smu_table *table = &smu_table->driver_table;
> +
> + memset(&req, 0, sizeof(req));
> + sienna_cichlid_fill_i2c_req(&req, false, address, numbytes, data);
> +
> + mutex_lock(&adev->smu.mutex);
> + /* Now read data starting with that address */
> + ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
> + true);
> + mutex_unlock(&adev->smu.mutex);
> +
> + if (!ret) {
> + SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
> +
> + /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
> + for (i = 0; i < numbytes; i++)
> + data[i] = res->SwI2cCmds[i].ReadWriteData;
> +
> + dev_dbg(adev->dev, "sienna_cichlid_i2c_read_data, address = %x, bytes = %d, data :",
> + (uint16_t)address, numbytes);
> +
> + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
> + 8, 1, data, numbytes, false);
> + } else
> + dev_err(adev->dev, "sienna_cichlid_i2c_read_data - error occurred :%x", ret);
> +
> + return ret;
> +}
> +
> +static int sienna_cichlid_i2c_write_data(struct i2c_adapter *control,
> + uint8_t address,
> + uint8_t *data,
> + uint32_t numbytes)
> +{
> + uint32_t ret;
> + SwI2cRequest_t req;
> + struct amdgpu_device *adev = to_amdgpu_device(control);
> +
> + memset(&req, 0, sizeof(req));
> + sienna_cichlid_fill_i2c_req(&req, true, address, numbytes, data);
> +
> + mutex_lock(&adev->smu.mutex);
> + ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
> + mutex_unlock(&adev->smu.mutex);
> +
> + if (!ret) {
> + dev_dbg(adev->dev, "sienna_cichlid_i2c_write(), address = %x, bytes = %d , data: ",
> + (uint16_t)address, numbytes);
> +
> + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
> + 8, 1, data, numbytes, false);
> + /*
> + * According to EEPROM spec there is a MAX of 10 ms required for
> + * EEPROM to flush internal RX buffer after STOP was issued at the
> + * end of write transaction. During this time the EEPROM will not be
> + * responsive to any more commands - so wait a bit more.
> + */
> + msleep(10);
> +
> + } else
> + dev_err(adev->dev, "sienna_cichlid_i2c_write- error occurred :%x", ret);
> +
> + return ret;
> +}
> +
> +static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
> + struct i2c_msg *msgs, int num)
> +{
> + uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
> + uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
> +
> + for (i = 0; i < num; i++) {
> + /*
> + * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
> + * once and hence the data needs to be spliced into chunks and sent each
> + * chunk separately
> + */
> + data_size = msgs[i].len - 2;
> + data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
> + next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
> + data_ptr = msgs[i].buf + 2;
> +
> + for (j = 0; j < data_size / data_chunk_size; j++) {
> + /* Insert the EEPROM dest addess, bits 0-15 */
> + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
> + data_chunk[1] = (next_eeprom_addr & 0xff);
> +
> + if (msgs[i].flags & I2C_M_RD) {
> + ret = sienna_cichlid_i2c_read_data(i2c_adap,
> + (uint8_t)msgs[i].addr,
> + data_chunk, MAX_SW_I2C_COMMANDS);
> +
> + memcpy(data_ptr, data_chunk + 2, data_chunk_size);
> + } else {
> +
> + memcpy(data_chunk + 2, data_ptr, data_chunk_size);
> +
> + ret = sienna_cichlid_i2c_write_data(i2c_adap,
> + (uint8_t)msgs[i].addr,
> + data_chunk, MAX_SW_I2C_COMMANDS);
> + }
> +
> + if (ret) {
> + num = -EIO;
> + goto fail;
> + }
> +
> + next_eeprom_addr += data_chunk_size;
> + data_ptr += data_chunk_size;
> + }
> +
> + if (data_size % data_chunk_size) {
> + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
> + data_chunk[1] = (next_eeprom_addr & 0xff);
> +
> + if (msgs[i].flags & I2C_M_RD) {
> + ret = sienna_cichlid_i2c_read_data(i2c_adap,
> + (uint8_t)msgs[i].addr,
> + data_chunk, (data_size % data_chunk_size) + 2);
> +
> + memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
> + } else {
> + memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
> +
> + ret = sienna_cichlid_i2c_write_data(i2c_adap,
> + (uint8_t)msgs[i].addr,
> + data_chunk, (data_size % data_chunk_size) + 2);
> + }
> +
> + if (ret) {
> + num = -EIO;
> + goto fail;
> + }
> + }
> + }
> +
> +fail:
> + return num;
> +}
> +
> +static u32 sienna_cichlid_i2c_func(struct i2c_adapter *adap)
> +{
> + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
> +}
> +
> +
> +static const struct i2c_algorithm sienna_cichlid_i2c_algo = {
> + .master_xfer = sienna_cichlid_i2c_xfer,
> + .functionality = sienna_cichlid_i2c_func,
> +};
> +
> +static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control)
> +{
> + struct amdgpu_device *adev = to_amdgpu_device(control);
> +
> + return control->dev.parent == &adev->pdev->dev;
> +}
> +
> +static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
> +{
> + struct amdgpu_device *adev = to_amdgpu_device(control);
> + int res;
> +
> + /* smu_i2c_eeprom_init may be called twice in sriov */
> + if (sienna_cichlid_i2c_adapter_is_added(control))
> + return 0;
> +
> + control->owner = THIS_MODULE;
> + control->class = I2C_CLASS_SPD;
> + control->dev.parent = &adev->pdev->dev;
> + control->algo = &sienna_cichlid_i2c_algo;
> + snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
> +
> + res = i2c_add_adapter(control);
> + if (res)
> + DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
> +
> + return res;
> +}
> +
> +static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
> +{
> + if (!sienna_cichlid_i2c_adapter_is_added(control))
> + return;
> +
> + i2c_del_adapter(control);
> +}
> +
> +
> static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
> .tables_init = sienna_cichlid_tables_init,
> .alloc_dpm_context = sienna_cichlid_allocate_dpm_context,
> @@ -2500,6 +2737,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
> .set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
> .dpm_set_vcn_enable = sienna_cichlid_dpm_set_vcn_enable,
> .dpm_set_jpeg_enable = sienna_cichlid_dpm_set_jpeg_enable,
> + .i2c_eeprom_init = sienna_cichlid_i2c_control_init,
> + .i2c_eeprom_fini = sienna_cichlid_i2c_control_fini,
> .print_clk_levels = sienna_cichlid_print_clk_levels,
> .force_clk_levels = sienna_cichlid_force_clk_levels,
> .populate_umd_state_clk = sienna_cichlid_populate_umd_state_clk,
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/amd-gfx/attachments/20200721/0d88349d/attachment-0001.htm>
More information about the amd-gfx
mailing list