[RFC PATCH] drm/ttm, drm/vmwgfx: Have TTM support AMD SEV encryption
Thomas Hellstrom
thomas at shipmail.org
Fri May 24 09:11:54 UTC 2019
Hi, Christian,
On 5/24/19 10:37 AM, Koenig, Christian wrote:
> Am 24.05.19 um 10:11 schrieb Thomas Hellström (VMware):
>> [CAUTION: External Email]
>>
>> From: Thomas Hellstrom <thellstrom at vmware.com>
>>
>> With SEV encryption, all DMA memory must be marked decrypted
>> (AKA "shared") for devices to be able to read it. In the future we might
>> want to be able to switch normal (encrypted) memory to decrypted in exactly
>> the same way as we handle caching states, and that would require additional
>> memory pools. But for now, rely on memory allocated with
>> dma_alloc_coherent() which is already decrypted with SEV enabled. Set up
>> the page protection accordingly. Drivers must detect SEV enabled and switch
>> to the dma page pool.
>>
>> This patch has not yet been tested. As a follow-up, we might want to
>> cache decrypted pages in the dma page pool regardless of their caching
>> state.
> This patch is unnecessary, SEV support already works fine with at least
> amdgpu and I would expect that it also works with other drivers as well.
>
> Also see this patch:
>
> commit 64e1f830ea5b3516a4256ed1c504a265d7f2a65c
> Author: Christian König <christian.koenig at amd.com>
> Date: Wed Mar 13 10:11:19 2019 +0100
>
> drm: fallback to dma_alloc_coherent when memory encryption is active
>
> We can't just map any randome page we get when memory encryption is
> active.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> Acked-by: Alex Deucher <alexander.deucher at amd.com>
> Link: https://patchwork.kernel.org/patch/10850833/
>
> Regards,
> Christian.
Yes, I noticed that. Although I fail to see where we automagically clear
the PTE encrypted bit when mapping coherent memory? For the linear
kernel map, that's done within dma_alloc_coherent() but for kernel vmaps
and and user-space maps? Is that done automatically by the x86 platform
layer?
/Thomas
>
>> Cc: Christian König <christian.koenig at amd.com>
>> Signed-off-by: Thomas Hellstrom <thellstrom at vmware.com>
>> ---
>> drivers/gpu/drm/ttm/ttm_bo_util.c | 17 +++++++++++++----
>> drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 ++++--
>> drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 3 +++
>> drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 6 ++++--
>> include/drm/ttm/ttm_bo_driver.h | 8 +++++---
>> include/drm/ttm/ttm_tt.h | 1 +
>> 6 files changed, 30 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
>> index 895d77d799e4..1d6643bd0b01 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
>> @@ -419,11 +419,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
>> page = i * dir + add;
>> if (old_iomap == NULL) {
>> pgprot_t prot = ttm_io_prot(old_mem->placement,
>> + ttm->page_flags,
>> PAGE_KERNEL);
>> ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
>> prot);
>> } else if (new_iomap == NULL) {
>> pgprot_t prot = ttm_io_prot(new_mem->placement,
>> + ttm->page_flags,
>> PAGE_KERNEL);
>> ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
>> prot);
>> @@ -526,11 +528,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>> return 0;
>> }
>>
>> -pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
>> +pgprot_t ttm_io_prot(u32 caching_flags, u32 tt_page_flags, pgprot_t tmp)
>> {
>> /* Cached mappings need no adjustment */
>> if (caching_flags & TTM_PL_FLAG_CACHED)
>> - return tmp;
>> + goto check_encryption;
>>
>> #if defined(__i386__) || defined(__x86_64__)
>> if (caching_flags & TTM_PL_FLAG_WC)
>> @@ -548,6 +550,11 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
>> #if defined(__sparc__) || defined(__mips__)
>> tmp = pgprot_noncached(tmp);
>> #endif
>> +
>> +check_encryption:
>> + if (tt_page_flags & TTM_PAGE_FLAG_DECRYPTED)
>> + tmp = pgprot_decrypted(tmp);
>> +
>> return tmp;
>> }
>> EXPORT_SYMBOL(ttm_io_prot);
>> @@ -594,7 +601,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
>> if (ret)
>> return ret;
>>
>> - if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
>> + if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED) &&
>> + !(ttm->page_flags & TTM_PAGE_FLAG_DECRYPTED)) {
>> /*
>> * We're mapping a single page, and the desired
>> * page protection is consistent with the bo.
>> @@ -608,7 +616,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
>> * We need to use vmap to get the desired page protection
>> * or to make the buffer object look contiguous.
>> */
>> - prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
>> + prot = ttm_io_prot(mem->placement, ttm->page_flags,
>> + PAGE_KERNEL);
>> map->bo_kmap_type = ttm_bo_map_vmap;
>> map->virtual = vmap(ttm->pages + start_page, num_pages,
>> 0, prot);
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
>> index 2d9862fcf6fd..e12247edd243 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
>> @@ -245,7 +245,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
>> goto out_io_unlock;
>> }
>>
>> - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
>> if (!bo->mem.bus.is_iomem) {
>> struct ttm_operation_ctx ctx = {
>> .interruptible = false,
>> @@ -255,13 +254,16 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
>> };
>>
>> ttm = bo->ttm;
>> + cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
>> + ttm->page_flags, prot);
>> if (ttm_tt_populate(bo->ttm, &ctx)) {
>> ret = VM_FAULT_OOM;
>> goto out_io_unlock;
>> }
>> } else {
>> /* Iomem should not be marked encrypted */
>> - cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
>> + cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
>> + TTM_PAGE_FLAG_DECRYPTED, prot);
>> }
>>
>> /*
>> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
>> index 98d100fd1599..1a8a09c05805 100644
>> --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
>> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
>> @@ -979,6 +979,9 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
>> }
>>
>> ttm->state = tt_unbound;
>> + if (sev_active())
>> + ttm->page_flags |= TTM_PAGE_FLAG_DECRYPTED;
>> +
>> return 0;
>> }
>> EXPORT_SYMBOL_GPL(ttm_dma_populate);
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
>> index fc6673cde289..11c8cd248530 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
>> @@ -483,8 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
>> d.src_pages = src->ttm->pages;
>> d.dst_num_pages = dst->num_pages;
>> d.src_num_pages = src->num_pages;
>> - d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
>> - d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
>> + d.dst_prot = ttm_io_prot(dst->mem.placement, dst->ttm->page_flags,
>> + PAGE_KERNEL);
>> + d.src_prot = ttm_io_prot(src->mem.placement, src->ttm->page_flags,
>> + PAGE_KERNEL);
>> d.diff = diff;
>>
>> for (j = 0; j < h; ++j) {
>> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
>> index 53fe95be5b32..261cc89c024e 100644
>> --- a/include/drm/ttm/ttm_bo_driver.h
>> +++ b/include/drm/ttm/ttm_bo_driver.h
>> @@ -889,13 +889,15 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
>> /**
>> * ttm_io_prot
>> *
>> - * @c_state: Caching state.
>> + * @caching_flags: The caching flags of the map.
>> + * @tt_page_flags: The tt_page_flags of the map, TTM_PAGE_FLAG_*
>> * @tmp: Page protection flag for a normal, cached mapping.
>> *
>> * Utility function that returns the pgprot_t that should be used for
>> - * setting up a PTE with the caching model indicated by @c_state.
>> + * setting up a PTE with the caching model indicated by @caching_flags,
>> + * and encryption state indicated by @tt_page_flags,
>> */
>> -pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
>> +pgprot_t ttm_io_prot(u32 caching_flags, u32 tt_page_flags, pgprot_t tmp);
>>
>> extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
>>
>> diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
>> index c0e928abf592..45cc26355513 100644
>> --- a/include/drm/ttm/ttm_tt.h
>> +++ b/include/drm/ttm/ttm_tt.h
>> @@ -41,6 +41,7 @@ struct ttm_operation_ctx;
>> #define TTM_PAGE_FLAG_DMA32 (1 << 7)
>> #define TTM_PAGE_FLAG_SG (1 << 8)
>> #define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
>> +#define TTM_PAGE_FLAG_DECRYPTED (1 << 10)
>>
>> enum ttm_caching_state {
>> tt_uncached,
>> --
>> 2.20.1
>>
More information about the dri-devel
mailing list