[PATCH 3/5] drm/amd/display: Add sysfs interface for set/get srm
Bhawanpreet Lakha
Bhawanpreet.lakha at amd.com
Fri Jan 17 19:29:43 UTC 2020
On 2020-01-17 2:23 p.m., Alex Deucher wrote:
> On Thu, Jan 16, 2020 at 3:30 PM Bhawanpreet Lakha
> <Bhawanpreet.Lakha at amd.com> wrote:
>> [Why]
>> We need to set/get SRM and linux kernel is not suppose to write to the
>> storage, so we need to provide a interface.
>>
>> [How]
>> Provide interface so usermode can set/get srm
>>
>> Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha at amd.com>
>> Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
>> ---
>> .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 124 +++++++++++++++++-
>> .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.h | 6 +
>> 2 files changed, 128 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
>> index a269916f7dd6..a191c84ad8eb 100644
>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
>> @@ -28,6 +28,8 @@
>> #include "amdgpu_dm.h"
>> #include "dm_helpers.h"
>> #include <drm/drm_hdcp.h>
>> +#include "hdcp_psp.h"
>> +
>>
>> static bool
>> lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
>> @@ -67,6 +69,16 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
>> return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
>> }
>>
>> +static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
>> +{
>> + return NULL;
>> +}
>> +
>> +static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
>> +{
>> + return 0;
>> +}
>> +
>> static void process_output(struct hdcp_workqueue *hdcp_work)
>> {
>> struct mod_hdcp_output output = hdcp_work->output;
>> @@ -88,6 +100,18 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
>> schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
>> }
>>
>> +static void link_lock(struct hdcp_workqueue *work, bool lock)
>> +{
>> +
>> + int i = 0;
>> +
>> + for (i = 0; i < work->max_link; i++) {
>> + if (lock)
>> + mutex_lock(&work[i].mutex);
>> + else
>> + mutex_unlock(&work[i].mutex);
>> + }
>> +}
>> void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
>> unsigned int link_index,
>> struct amdgpu_dm_connector *aconnector,
>> @@ -302,7 +326,8 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
>> }
>>
>> kfree(hdcp_work);
>> -
>> + kfree(hdcp_work->srm);
>> + kfree(hdcp_work->srm_temp);
>> }
>>
>> static void update_config(void *handle, struct cp_psp_stream_config *config)
>> @@ -338,6 +363,84 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
>> hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
>> }
>>
>> +
>> +/*
>> + * This can be called twice, because SRM_SIZE > PAGE_SIZE.
>> + *
>> + * We set the SRM on each call, if SRM_SIZE > PAGE_SIZE, PSP will fail on the
>> + * first call but pass on the second call.
>> + *
>> + * Because of this we are not throwing any errors as it will stop the next call.
>> + * So it is a good idea to call the "read" sysfs to verify that the SRM was set
>> + *
>> + */
> Rather than using a file to get the data directly in chunks, how about
> adding a sysfs file where you can specify the path to the srm file.
> The driver can then use the path provided to call request firmware and
> just get the entire binary in one shot.
>
> Alex
>
>
I thought about using request_firmware but, since we also need to save
the data aswell
and there is no "save_firmware" interface, we would have difference
interfaces for reading and writing.
So to keep it consistent I used sysfs for both. Now we just cat and echo
the raw data. vs echo file patch and cat raw data.
Bhawan
>> +static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
>> + loff_t pos, size_t count)
>> +{
>> + struct hdcp_workqueue *work;
>> + uint32_t srm_version = 0;
>> +
>> + work = container_of(bin_attr, struct hdcp_workqueue, attr);
>> + link_lock(work, true);
>> +
>> + memcpy(work->srm_temp + pos, buffer, count);
>> +
>> + if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
>> + DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
>> + memcpy(work->srm, work->srm_temp, pos + count);
>> + work->srm_size = pos + count;
>> + work->srm_version = srm_version;
>> + }
>> +
>> +
>> + link_lock(work, false);
>> +
>> + return count;
>> +}
>> +
>> +static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
>> + loff_t pos, size_t count)
>> +{
>> + struct hdcp_workqueue *work;
>> + uint8_t *srm = NULL;
>> + uint32_t srm_version;
>> + uint32_t srm_size;
>> + size_t ret = count;
>> +
>> + work = container_of(bin_attr, struct hdcp_workqueue, attr);
>> +
>> + link_lock(work, true);
>> +
>> + srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
>> +
>> + if (!srm)
>> + return -EINVAL;
>> +
>> + if (pos >= srm_size)
>> + ret = 0;
>> +
>> + if (srm_size - pos < count) {
>> + memcpy(buffer, srm + pos, srm_size - pos);
>> + ret = srm_size - pos;
>> + goto ret;
>> + }
>> +
>> + memcpy(buffer, srm + pos, count);
>> +
>> +ret:
>> + link_lock(work, false);
>> + return ret;
>> +}
>> +
>> +
>> +static const struct bin_attribute data_attr = {
>> + .attr = {.name = "hdcp_srm", .mode = 0664},
>> + .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
>> + .write = srm_data_write,
>> + .read = srm_data_read,
>> +};
>> +
>> +
>> struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
>> {
>>
>> @@ -348,10 +451,19 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
>> if (hdcp_work == NULL)
>> goto fail_alloc_context;
>>
>> + hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
>> +
>> + if (hdcp_work->srm == NULL)
>> + goto fail_alloc_context;
>> +
>> + hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
>> +
>> + if (hdcp_work->srm_temp == NULL)
>> + goto fail_alloc_context;
>> +
>> hdcp_work->max_link = max_caps;
>>
>> for (i = 0; i < max_caps; i++) {
>> -
>> mutex_init(&hdcp_work[i].mutex);
>>
>> INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
>> @@ -371,10 +483,18 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
>> cp_psp->funcs.update_stream_config = update_config;
>> cp_psp->handle = hdcp_work;
>>
>> + /* File created at /sys/class/drm/card0/device/hdcp_srm*/
>> + hdcp_work[0].attr = data_attr;
>> +
>> + if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
>> + DRM_WARN("Failed to create device file hdcp_srm");
>> +
>> return hdcp_work;
>>
>> fail_alloc_context:
>> kfree(hdcp_work);
>> + kfree(hdcp_work->srm);
>> + kfree(hdcp_work->srm_temp);
>>
>> return NULL;
>>
>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
>> index 331b50825510..5159b3a5e5b0 100644
>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
>> @@ -53,6 +53,12 @@ struct hdcp_workqueue {
>>
>> enum mod_hdcp_encryption_status encryption_status;
>> uint8_t max_link;
>> +
>> + uint8_t *srm;
>> + uint8_t *srm_temp;
>> + uint32_t srm_version;
>> + uint32_t srm_size;
>> + struct bin_attribute attr;
>> };
>>
>> void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
>> --
>> 2.17.1
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx at lists.freedesktop.org
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7CBhawanpreet.Lakha%40amd.com%7C31850cb2894b4459b6cf08d79b82b6af%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637148858026280913&sdata=%2BncbIbzgws%2BKPbTAqzARNGAZWM66UoxzjDeJm6vvXlg%3D&reserved=0
More information about the amd-gfx
mailing list