[Mesa-dev] [PATCH 10/15] anv: modify the internal concept of format to express multiple planes
Jason Ekstrand
jason at jlekstrand.net
Mon Sep 18 19:05:14 UTC 2017
On Fri, Sep 15, 2017 at 7:11 AM, Lionel Landwerlin <
lionel.g.landwerlin at intel.com> wrote:
> A given Vulkan format can now be decomposed into a set of planes. We
> now use 'struct anv_format_plane' to represent the format of those
> planes.
>
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
> ---
> src/intel/vulkan/anv_blorp.c | 31 +--
> src/intel/vulkan/anv_formats.c | 513 +++++++++++++++++++++---------
> ---------
> src/intel/vulkan/anv_image.c | 16 +-
> src/intel/vulkan/anv_private.h | 54 ++++-
> src/intel/vulkan/genX_pipeline.c | 14 +-
> 5 files changed, 355 insertions(+), 273 deletions(-)
>
> diff --git a/src/intel/vulkan/anv_blorp.c b/src/intel/vulkan/anv_blorp.c
> index 915643ffa3c..c1f6eb69ca8 100644
> --- a/src/intel/vulkan/anv_blorp.c
> +++ b/src/intel/vulkan/anv_blorp.c
> @@ -323,8 +323,9 @@ copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
> }
>
> const enum isl_format buffer_format =
> - anv_get_isl_format(&cmd_buffer->device->info,
> anv_image->vk_format,
> - aspect, VK_IMAGE_TILING_LINEAR);
> + anv_get_isl_plane_format(&cmd_buffer->device->info,
> + anv_image->vk_format,
> + aspect, VK_IMAGE_TILING_LINEAR);
>
> const VkExtent3D bufferImageExtent = {
> .width = pRegions[r].bufferRowLength ?
> @@ -459,12 +460,12 @@ void anv_CmdBlitImage(
> get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
> dst_image->aux_usage, &dst);
>
> - struct anv_format src_format =
> - anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
> - src_res->aspectMask, src_image->tiling);
> - struct anv_format dst_format =
> - anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
> - dst_res->aspectMask, dst_image->tiling);
> + struct anv_format_plane src_format =
> + anv_get_plane_format(&cmd_buffer->device->info,
> src_image->vk_format,
> + src_res->aspectMask, src_image->tiling);
> + struct anv_format_plane dst_format =
> + anv_get_plane_format(&cmd_buffer->device->info,
> dst_image->vk_format,
> + dst_res->aspectMask, dst_image->tiling);
>
> unsigned dst_start, dst_end;
> if (dst_image->type == VK_IMAGE_TYPE_3D) {
> @@ -760,9 +761,9 @@ void anv_CmdClearColorImage(
>
> assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
>
> - struct anv_format src_format =
> - anv_get_format(&cmd_buffer->device->info, image->vk_format,
> - VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
> + struct anv_format_plane src_format =
> + anv_get_plane_format(&cmd_buffer->device->info,
> image->vk_format,
> + VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
>
> unsigned base_layer = pRanges[r].baseArrayLayer;
> unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
> @@ -976,10 +977,10 @@ clear_depth_stencil_attachment(struct
> anv_cmd_buffer *cmd_buffer,
>
> enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
> if (clear_depth) {
> - depth_format = anv_get_isl_format(&cmd_buffer->device->info,
> - pass_att->format,
> - VK_IMAGE_ASPECT_DEPTH_BIT,
> - VK_IMAGE_TILING_OPTIMAL);
> + depth_format = anv_get_isl_plane_format(&cmd_buffer->device->info,
> + pass_att->format,
> + VK_IMAGE_ASPECT_DEPTH_BIT,
> + VK_IMAGE_TILING_OPTIMAL);
> }
>
> uint32_t binding_table;
> diff --git a/src/intel/vulkan/anv_formats.c b/src/intel/vulkan/anv_formats
> .c
> index 47acc56fd04..b311e15ce3f 100644
> --- a/src/intel/vulkan/anv_formats.c
> +++ b/src/intel/vulkan/anv_formats.c
> @@ -44,249 +44,287 @@
> #define BGRA _ISL_SWIZZLE(BLUE, GREEN, RED, ALPHA)
> #define RGB1 _ISL_SWIZZLE(RED, GREEN, BLUE, ONE)
>
> -#define swiz_fmt(__vk_fmt, __hw_fmt, __swizzle) \
> - [__vk_fmt] = { \
> - .isl_format = __hw_fmt, \
> - .swizzle = __swizzle, \
> +#define _fmt(__hw_fmt, __swizzle) \
> + { .isl_format = __hw_fmt, \
> + .swizzle = __swizzle }
> +
> +#define swiz_fmt1(__vk_fmt, __hw_fmt, __swizzle) \
> + [vk_enum_offset(__vk_fmt)] = { \
>
I think it would be nice to split this into two patches: One which
switches us to table-of-tables and the other adds planes to anv_format.
> + .planes = { \
> + { .isl_format = __hw_fmt, .swizzle = __swizzle }, \
> + }, \
> + .n_planes = 1, \
> }
>
> -#define fmt(__vk_fmt, __hw_fmt) \
> - swiz_fmt(__vk_fmt, __hw_fmt, RGBA)
> +#define fmt1(__vk_fmt, __hw_fmt) \
> + swiz_fmt1(__vk_fmt, __hw_fmt, RGBA)
> +
> +#define ds_fmt(__vk_fmt, __depth_fmt, __stencil_fmt) \
> + [vk_enum_offset(__vk_fmt)] = { \
> + .planes = { \
> + { .isl_format = __depth_fmt, \
> + .swizzle = RGBA, \
> + }, \
> + { .isl_format = __stencil_fmt, \
> + .swizzle = RGBA, \
> + }, \
>
+ }, \
> + .n_planes = 2, \
>
It's a bit weird to me that this is only used for combined depth+stencil
formats. I would expect something called ds_fmt to be used for all
depth/stencil formats. However, doing that magic would be rather
annoying. Maybe just call it fmt2?
> + }
>
> /* HINT: For array formats, the ISL name should match the VK name. For
> * packed formats, they should have the channels in reverse order from
> each
> * other. The reason for this is that, for packed formats, the ISL (and
> * bspec) names are in LSB -> MSB order while VK formats are MSB -> LSB.
> */
> -static const struct anv_format anv_formats[] = {
> - fmt(VK_FORMAT_UNDEFINED, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_R4G4_UNORM_PACK8, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_R4G4B4A4_UNORM_PACK16, ISL_FORMAT_A4B4G4R4_UNORM),
> - swiz_fmt(VK_FORMAT_B4G4R4A4_UNORM_PACK16,
> ISL_FORMAT_A4B4G4R4_UNORM, BGRA),
> - fmt(VK_FORMAT_R5G6B5_UNORM_PACK16, ISL_FORMAT_B5G6R5_UNORM),
> - swiz_fmt(VK_FORMAT_B5G6R5_UNORM_PACK16, ISL_FORMAT_B5G6R5_UNORM,
> BGRA),
> - fmt(VK_FORMAT_R5G5B5A1_UNORM_PACK16, ISL_FORMAT_A1B5G5R5_UNORM),
> - fmt(VK_FORMAT_B5G5R5A1_UNORM_PACK16, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_A1R5G5B5_UNORM_PACK16, ISL_FORMAT_B5G5R5A1_UNORM),
> - fmt(VK_FORMAT_R8_UNORM, ISL_FORMAT_R8_UNORM),
> - fmt(VK_FORMAT_R8_SNORM, ISL_FORMAT_R8_SNORM),
> - fmt(VK_FORMAT_R8_USCALED, ISL_FORMAT_R8_USCALED),
> - fmt(VK_FORMAT_R8_SSCALED, ISL_FORMAT_R8_SSCALED),
> - fmt(VK_FORMAT_R8_UINT, ISL_FORMAT_R8_UINT),
> - fmt(VK_FORMAT_R8_SINT, ISL_FORMAT_R8_SINT),
> - fmt(VK_FORMAT_R8_SRGB, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_R8G8_UNORM, ISL_FORMAT_R8G8_UNORM),
> - fmt(VK_FORMAT_R8G8_SNORM, ISL_FORMAT_R8G8_SNORM),
> - fmt(VK_FORMAT_R8G8_USCALED, ISL_FORMAT_R8G8_USCALED),
> - fmt(VK_FORMAT_R8G8_SSCALED, ISL_FORMAT_R8G8_SSCALED),
> - fmt(VK_FORMAT_R8G8_UINT, ISL_FORMAT_R8G8_UINT),
> - fmt(VK_FORMAT_R8G8_SINT, ISL_FORMAT_R8G8_SINT),
> - fmt(VK_FORMAT_R8G8_SRGB, ISL_FORMAT_UNSUPPORTED), /*
> L8A8_UNORM_SRGB */
> - fmt(VK_FORMAT_R8G8B8_UNORM, ISL_FORMAT_R8G8B8_UNORM),
> - fmt(VK_FORMAT_R8G8B8_SNORM, ISL_FORMAT_R8G8B8_SNORM),
> - fmt(VK_FORMAT_R8G8B8_USCALED, ISL_FORMAT_R8G8B8_USCALED),
> - fmt(VK_FORMAT_R8G8B8_SSCALED, ISL_FORMAT_R8G8B8_SSCALED),
> - fmt(VK_FORMAT_R8G8B8_UINT, ISL_FORMAT_R8G8B8_UINT),
> - fmt(VK_FORMAT_R8G8B8_SINT, ISL_FORMAT_R8G8B8_SINT),
> - fmt(VK_FORMAT_R8G8B8_SRGB, ISL_FORMAT_R8G8B8_UNORM_SRGB),
> - fmt(VK_FORMAT_R8G8B8A8_UNORM, ISL_FORMAT_R8G8B8A8_UNORM),
> - fmt(VK_FORMAT_R8G8B8A8_SNORM, ISL_FORMAT_R8G8B8A8_SNORM),
> - fmt(VK_FORMAT_R8G8B8A8_USCALED, ISL_FORMAT_R8G8B8A8_USCALED),
> - fmt(VK_FORMAT_R8G8B8A8_SSCALED, ISL_FORMAT_R8G8B8A8_SSCALED),
> - fmt(VK_FORMAT_R8G8B8A8_UINT, ISL_FORMAT_R8G8B8A8_UINT),
> - fmt(VK_FORMAT_R8G8B8A8_SINT, ISL_FORMAT_R8G8B8A8_SINT),
> - fmt(VK_FORMAT_R8G8B8A8_SRGB, ISL_FORMAT_R8G8B8A8_UNORM_SRG
> B),
> - fmt(VK_FORMAT_A8B8G8R8_UNORM_PACK32, ISL_FORMAT_R8G8B8A8_UNORM),
> - fmt(VK_FORMAT_A8B8G8R8_SNORM_PACK32, ISL_FORMAT_R8G8B8A8_SNORM),
> - fmt(VK_FORMAT_A8B8G8R8_USCALED_PACK32, ISL_FORMAT_R8G8B8A8_USCALED),
> - fmt(VK_FORMAT_A8B8G8R8_SSCALED_PACK32, ISL_FORMAT_R8G8B8A8_SSCALED),
> - fmt(VK_FORMAT_A8B8G8R8_UINT_PACK32, ISL_FORMAT_R8G8B8A8_UINT),
> - fmt(VK_FORMAT_A8B8G8R8_SINT_PACK32, ISL_FORMAT_R8G8B8A8_SINT),
> - fmt(VK_FORMAT_A8B8G8R8_SRGB_PACK32, ISL_FORMAT_R8G8B8A8_UNORM_SRGB
> ),
> - fmt(VK_FORMAT_A2R10G10B10_UNORM_PACK32, ISL_FORMAT_B10G10R10A2_UNORM),
> - fmt(VK_FORMAT_A2R10G10B10_SNORM_PACK32, ISL_FORMAT_B10G10R10A2_SNORM),
> - fmt(VK_FORMAT_A2R10G10B10_USCALED_PACK32,
> ISL_FORMAT_B10G10R10A2_USCALED),
> - fmt(VK_FORMAT_A2R10G10B10_SSCALED_PACK32,
> ISL_FORMAT_B10G10R10A2_SSCALED),
> - fmt(VK_FORMAT_A2R10G10B10_UINT_PACK32, ISL_FORMAT_B10G10R10A2_UINT),
> - fmt(VK_FORMAT_A2R10G10B10_SINT_PACK32, ISL_FORMAT_B10G10R10A2_SINT),
> - fmt(VK_FORMAT_A2B10G10R10_UNORM_PACK32, ISL_FORMAT_R10G10B10A2_UNORM),
> - fmt(VK_FORMAT_A2B10G10R10_SNORM_PACK32, ISL_FORMAT_R10G10B10A2_SNORM),
> - fmt(VK_FORMAT_A2B10G10R10_USCALED_PACK32,
> ISL_FORMAT_R10G10B10A2_USCALED),
> - fmt(VK_FORMAT_A2B10G10R10_SSCALED_PACK32,
> ISL_FORMAT_R10G10B10A2_SSCALED),
> - fmt(VK_FORMAT_A2B10G10R10_UINT_PACK32, ISL_FORMAT_R10G10B10A2_UINT),
> - fmt(VK_FORMAT_A2B10G10R10_SINT_PACK32, ISL_FORMAT_R10G10B10A2_SINT),
> - fmt(VK_FORMAT_R16_UNORM, ISL_FORMAT_R16_UNORM),
> - fmt(VK_FORMAT_R16_SNORM, ISL_FORMAT_R16_SNORM),
> - fmt(VK_FORMAT_R16_USCALED, ISL_FORMAT_R16_USCALED),
> - fmt(VK_FORMAT_R16_SSCALED, ISL_FORMAT_R16_SSCALED),
> - fmt(VK_FORMAT_R16_UINT, ISL_FORMAT_R16_UINT),
> - fmt(VK_FORMAT_R16_SINT, ISL_FORMAT_R16_SINT),
> - fmt(VK_FORMAT_R16_SFLOAT, ISL_FORMAT_R16_FLOAT),
> - fmt(VK_FORMAT_R16G16_UNORM, ISL_FORMAT_R16G16_UNORM),
> - fmt(VK_FORMAT_R16G16_SNORM, ISL_FORMAT_R16G16_SNORM),
> - fmt(VK_FORMAT_R16G16_USCALED, ISL_FORMAT_R16G16_USCALED),
> - fmt(VK_FORMAT_R16G16_SSCALED, ISL_FORMAT_R16G16_SSCALED),
> - fmt(VK_FORMAT_R16G16_UINT, ISL_FORMAT_R16G16_UINT),
> - fmt(VK_FORMAT_R16G16_SINT, ISL_FORMAT_R16G16_SINT),
> - fmt(VK_FORMAT_R16G16_SFLOAT, ISL_FORMAT_R16G16_FLOAT),
> - fmt(VK_FORMAT_R16G16B16_UNORM, ISL_FORMAT_R16G16B16_UNORM),
> - fmt(VK_FORMAT_R16G16B16_SNORM, ISL_FORMAT_R16G16B16_SNORM),
> - fmt(VK_FORMAT_R16G16B16_USCALED, ISL_FORMAT_R16G16B16_USCALED),
> - fmt(VK_FORMAT_R16G16B16_SSCALED, ISL_FORMAT_R16G16B16_SSCALED),
> - fmt(VK_FORMAT_R16G16B16_UINT, ISL_FORMAT_R16G16B16_UINT),
> - fmt(VK_FORMAT_R16G16B16_SINT, ISL_FORMAT_R16G16B16_SINT),
> - fmt(VK_FORMAT_R16G16B16_SFLOAT, ISL_FORMAT_R16G16B16_FLOAT),
> - fmt(VK_FORMAT_R16G16B16A16_UNORM, ISL_FORMAT_R16G16B16A16_UNORM),
> - fmt(VK_FORMAT_R16G16B16A16_SNORM, ISL_FORMAT_R16G16B16A16_SNORM),
> - fmt(VK_FORMAT_R16G16B16A16_USCALED, ISL_FORMAT_R16G16B16A16_USCALE
> D),
> - fmt(VK_FORMAT_R16G16B16A16_SSCALED, ISL_FORMAT_R16G16B16A16_SSCALE
> D),
> - fmt(VK_FORMAT_R16G16B16A16_UINT, ISL_FORMAT_R16G16B16A16_UINT),
> - fmt(VK_FORMAT_R16G16B16A16_SINT, ISL_FORMAT_R16G16B16A16_SINT),
> - fmt(VK_FORMAT_R16G16B16A16_SFLOAT, ISL_FORMAT_R16G16B16A16_FLOAT),
> - fmt(VK_FORMAT_R32_UINT, ISL_FORMAT_R32_UINT),
> - fmt(VK_FORMAT_R32_SINT, ISL_FORMAT_R32_SINT),
> - fmt(VK_FORMAT_R32_SFLOAT, ISL_FORMAT_R32_FLOAT),
> - fmt(VK_FORMAT_R32G32_UINT, ISL_FORMAT_R32G32_UINT),
> - fmt(VK_FORMAT_R32G32_SINT, ISL_FORMAT_R32G32_SINT),
> - fmt(VK_FORMAT_R32G32_SFLOAT, ISL_FORMAT_R32G32_FLOAT),
> - fmt(VK_FORMAT_R32G32B32_UINT, ISL_FORMAT_R32G32B32_UINT),
> - fmt(VK_FORMAT_R32G32B32_SINT, ISL_FORMAT_R32G32B32_SINT),
> - fmt(VK_FORMAT_R32G32B32_SFLOAT, ISL_FORMAT_R32G32B32_FLOAT),
> - fmt(VK_FORMAT_R32G32B32A32_UINT, ISL_FORMAT_R32G32B32A32_UINT),
> - fmt(VK_FORMAT_R32G32B32A32_SINT, ISL_FORMAT_R32G32B32A32_SINT),
> - fmt(VK_FORMAT_R32G32B32A32_SFLOAT, ISL_FORMAT_R32G32B32A32_FLOAT),
> - fmt(VK_FORMAT_R64_UINT, ISL_FORMAT_R64_PASSTHRU),
> - fmt(VK_FORMAT_R64_SINT, ISL_FORMAT_R64_PASSTHRU),
> - fmt(VK_FORMAT_R64_SFLOAT, ISL_FORMAT_R64_PASSTHRU),
> - fmt(VK_FORMAT_R64G64_UINT, ISL_FORMAT_R64G64_PASSTHRU),
> - fmt(VK_FORMAT_R64G64_SINT, ISL_FORMAT_R64G64_PASSTHRU),
> - fmt(VK_FORMAT_R64G64_SFLOAT, ISL_FORMAT_R64G64_PASSTHRU),
> - fmt(VK_FORMAT_R64G64B64_UINT, ISL_FORMAT_R64G64B64_PASSTHRU),
> - fmt(VK_FORMAT_R64G64B64_SINT, ISL_FORMAT_R64G64B64_PASSTHRU),
> - fmt(VK_FORMAT_R64G64B64_SFLOAT, ISL_FORMAT_R64G64B64_PASSTHRU),
> - fmt(VK_FORMAT_R64G64B64A64_UINT, ISL_FORMAT_R64G64B64A64_PASST
> HRU),
> - fmt(VK_FORMAT_R64G64B64A64_SINT, ISL_FORMAT_R64G64B64A64_PASST
> HRU),
> - fmt(VK_FORMAT_R64G64B64A64_SFLOAT, ISL_FORMAT_R64G64B64A64_PASST
> HRU),
> - fmt(VK_FORMAT_B10G11R11_UFLOAT_PACK32, ISL_FORMAT_R11G11B10_FLOAT),
> - fmt(VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, ISL_FORMAT_R9G9B9E5_SHAREDEXP),
> -
> - fmt(VK_FORMAT_D16_UNORM, ISL_FORMAT_R16_UNORM),
> - fmt(VK_FORMAT_X8_D24_UNORM_PACK32, ISL_FORMAT_R24_UNORM_X8_TYPEL
> ESS),
> - fmt(VK_FORMAT_D32_SFLOAT, ISL_FORMAT_R32_FLOAT),
> - fmt(VK_FORMAT_S8_UINT, ISL_FORMAT_R8_UINT),
> - fmt(VK_FORMAT_D16_UNORM_S8_UINT, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_D24_UNORM_S8_UINT, ISL_FORMAT_R24_UNORM_X8_TYPEL
> ESS),
> - fmt(VK_FORMAT_D32_SFLOAT_S8_UINT, ISL_FORMAT_R32_FLOAT),
> -
> - swiz_fmt(VK_FORMAT_BC1_RGB_UNORM_BLOCK, ISL_FORMAT_BC1_UNORM,
> RGB1),
> - swiz_fmt(VK_FORMAT_BC1_RGB_SRGB_BLOCK,
> ISL_FORMAT_BC1_UNORM_SRGB, RGB1),
> - fmt(VK_FORMAT_BC1_RGBA_UNORM_BLOCK, ISL_FORMAT_BC1_UNORM),
> - fmt(VK_FORMAT_BC1_RGBA_SRGB_BLOCK, ISL_FORMAT_BC1_UNORM_SRGB),
> - fmt(VK_FORMAT_BC2_UNORM_BLOCK, ISL_FORMAT_BC2_UNORM),
> - fmt(VK_FORMAT_BC2_SRGB_BLOCK, ISL_FORMAT_BC2_UNORM_SRGB),
> - fmt(VK_FORMAT_BC3_UNORM_BLOCK, ISL_FORMAT_BC3_UNORM),
> - fmt(VK_FORMAT_BC3_SRGB_BLOCK, ISL_FORMAT_BC3_UNORM_SRGB),
> - fmt(VK_FORMAT_BC4_UNORM_BLOCK, ISL_FORMAT_BC4_UNORM),
> - fmt(VK_FORMAT_BC4_SNORM_BLOCK, ISL_FORMAT_BC4_SNORM),
> - fmt(VK_FORMAT_BC5_UNORM_BLOCK, ISL_FORMAT_BC5_UNORM),
> - fmt(VK_FORMAT_BC5_SNORM_BLOCK, ISL_FORMAT_BC5_SNORM),
> - fmt(VK_FORMAT_BC6H_UFLOAT_BLOCK, ISL_FORMAT_BC6H_UF16),
> - fmt(VK_FORMAT_BC6H_SFLOAT_BLOCK, ISL_FORMAT_BC6H_SF16),
> - fmt(VK_FORMAT_BC7_UNORM_BLOCK, ISL_FORMAT_BC7_UNORM),
> - fmt(VK_FORMAT_BC7_SRGB_BLOCK, ISL_FORMAT_BC7_UNORM_SRGB),
> - fmt(VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, ISL_FORMAT_ETC2_RGB8),
> - fmt(VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, ISL_FORMAT_ETC2_SRGB8),
> - fmt(VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, ISL_FORMAT_ETC2_RGB8_PTA),
> - fmt(VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, ISL_FORMAT_ETC2_SRGB8_PTA),
> - fmt(VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, ISL_FORMAT_ETC2_EAC_RGBA8),
> - fmt(VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, ISL_FORMAT_ETC2_EAC_SRGB8_A8),
> - fmt(VK_FORMAT_EAC_R11_UNORM_BLOCK, ISL_FORMAT_EAC_R11),
> - fmt(VK_FORMAT_EAC_R11_SNORM_BLOCK, ISL_FORMAT_EAC_SIGNED_R11),
> - fmt(VK_FORMAT_EAC_R11G11_UNORM_BLOCK, ISL_FORMAT_EAC_RG11),
> - fmt(VK_FORMAT_EAC_R11G11_SNORM_BLOCK, ISL_FORMAT_EAC_SIGNED_RG11),
> - fmt(VK_FORMAT_ASTC_4x4_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_4X4_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_5x4_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_5X4_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_5x5_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_5X5_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_6x5_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_6X5_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_6x6_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_6X6_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_8x5_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X5_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_8x6_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X6_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_8x8_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X8_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_10x5_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X5_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_10x6_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X6_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_10x8_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X8_U8
> SRGB),
> - fmt(VK_FORMAT_ASTC_10x10_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X10_
> U8SRGB),
> - fmt(VK_FORMAT_ASTC_12x10_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_12X10_
> U8SRGB),
> - fmt(VK_FORMAT_ASTC_12x12_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_12X12_
> U8SRGB),
> - fmt(VK_FORMAT_ASTC_4x4_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_4X4_FLT
> 16),
> - fmt(VK_FORMAT_ASTC_5x4_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_5X4_FLT
> 16),
> - fmt(VK_FORMAT_ASTC_5x5_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_5X5_FLT
> 16),
> - fmt(VK_FORMAT_ASTC_6x5_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_6X5_FLT
> 16),
> - fmt(VK_FORMAT_ASTC_6x6_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_6X6_FLT
> 16),
> - fmt(VK_FORMAT_ASTC_8x5_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X5_FLT
> 16),
> - fmt(VK_FORMAT_ASTC_8x6_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X6_FLT
> 16),
> - fmt(VK_FORMAT_ASTC_8x8_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X8_FLT
> 16),
> - fmt(VK_FORMAT_ASTC_10x5_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X5_F
> LT16),
> - fmt(VK_FORMAT_ASTC_10x6_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X6_F
> LT16),
> - fmt(VK_FORMAT_ASTC_10x8_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X8_F
> LT16),
> - fmt(VK_FORMAT_ASTC_10x10_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X10_F
> LT16),
> - fmt(VK_FORMAT_ASTC_12x10_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_12X10_F
> LT16),
> - fmt(VK_FORMAT_ASTC_12x12_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_12X12_F
> LT16),
> - fmt(VK_FORMAT_B8G8R8_UNORM, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8_SNORM, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8_USCALED, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8_SSCALED, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8_UINT, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8_SINT, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8_SRGB, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8A8_UNORM, ISL_FORMAT_B8G8R8A8_UNORM),
> - fmt(VK_FORMAT_B8G8R8A8_SNORM, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8A8_USCALED, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8A8_SSCALED, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8A8_UINT, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8A8_SINT, ISL_FORMAT_UNSUPPORTED),
> - fmt(VK_FORMAT_B8G8R8A8_SRGB, ISL_FORMAT_B8G8R8A8_UNORM_SRG
> B),
> +static const struct anv_format main_formats[] = {
> + fmt1(VK_FORMAT_UNDEFINED, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_R4G4_UNORM_PACK8, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_R4G4B4A4_UNORM_PACK16, ISL_FORMAT_A4B4G4R4_UNORM),
> + swiz_fmt1(VK_FORMAT_B4G4R4A4_UNORM_PACK16,
> ISL_FORMAT_A4B4G4R4_UNORM, BGRA),
> + fmt1(VK_FORMAT_R5G6B5_UNORM_PACK16, ISL_FORMAT_B5G6R5_UNORM),
> + swiz_fmt1(VK_FORMAT_B5G6R5_UNORM_PACK16, ISL_FORMAT_B5G6R5_UNORM,
> BGRA),
> + fmt1(VK_FORMAT_R5G5B5A1_UNORM_PACK16, ISL_FORMAT_A1B5G5R5_UNORM),
> + fmt1(VK_FORMAT_B5G5R5A1_UNORM_PACK16, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_A1R5G5B5_UNORM_PACK16, ISL_FORMAT_B5G5R5A1_UNORM),
> + fmt1(VK_FORMAT_R8_UNORM, ISL_FORMAT_R8_UNORM),
> + fmt1(VK_FORMAT_R8_SNORM, ISL_FORMAT_R8_SNORM),
> + fmt1(VK_FORMAT_R8_USCALED, ISL_FORMAT_R8_USCALED),
> + fmt1(VK_FORMAT_R8_SSCALED, ISL_FORMAT_R8_SSCALED),
> + fmt1(VK_FORMAT_R8_UINT, ISL_FORMAT_R8_UINT),
> + fmt1(VK_FORMAT_R8_SINT, ISL_FORMAT_R8_SINT),
> + fmt1(VK_FORMAT_R8_SRGB, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_R8G8_UNORM, ISL_FORMAT_R8G8_UNORM),
> + fmt1(VK_FORMAT_R8G8_SNORM, ISL_FORMAT_R8G8_SNORM),
> + fmt1(VK_FORMAT_R8G8_USCALED, ISL_FORMAT_R8G8_USCALED),
> + fmt1(VK_FORMAT_R8G8_SSCALED, ISL_FORMAT_R8G8_SSCALED),
> + fmt1(VK_FORMAT_R8G8_UINT, ISL_FORMAT_R8G8_UINT),
> + fmt1(VK_FORMAT_R8G8_SINT, ISL_FORMAT_R8G8_SINT),
> + fmt1(VK_FORMAT_R8G8_SRGB, ISL_FORMAT_UNSUPPORTED), /*
> L8A8_UNORM_SRGB */
> + fmt1(VK_FORMAT_R8G8B8_UNORM, ISL_FORMAT_R8G8B8_UNORM),
> + fmt1(VK_FORMAT_R8G8B8_SNORM, ISL_FORMAT_R8G8B8_SNORM),
> + fmt1(VK_FORMAT_R8G8B8_USCALED, ISL_FORMAT_R8G8B8_USCALED),
> + fmt1(VK_FORMAT_R8G8B8_SSCALED, ISL_FORMAT_R8G8B8_SSCALED),
> + fmt1(VK_FORMAT_R8G8B8_UINT, ISL_FORMAT_R8G8B8_UINT),
> + fmt1(VK_FORMAT_R8G8B8_SINT, ISL_FORMAT_R8G8B8_SINT),
> + fmt1(VK_FORMAT_R8G8B8_SRGB, ISL_FORMAT_R8G8B8_UNORM_SRGB),
> + fmt1(VK_FORMAT_R8G8B8A8_UNORM, ISL_FORMAT_R8G8B8A8_UNORM),
> + fmt1(VK_FORMAT_R8G8B8A8_SNORM, ISL_FORMAT_R8G8B8A8_SNORM),
> + fmt1(VK_FORMAT_R8G8B8A8_USCALED, ISL_FORMAT_R8G8B8A8_USCALED),
> + fmt1(VK_FORMAT_R8G8B8A8_SSCALED, ISL_FORMAT_R8G8B8A8_SSCALED),
> + fmt1(VK_FORMAT_R8G8B8A8_UINT, ISL_FORMAT_R8G8B8A8_UINT),
> + fmt1(VK_FORMAT_R8G8B8A8_SINT, ISL_FORMAT_R8G8B8A8_SINT),
> + fmt1(VK_FORMAT_R8G8B8A8_SRGB, ISL_FORMAT_R8G8B8A8_UNORM_SRG
> B),
> + fmt1(VK_FORMAT_A8B8G8R8_UNORM_PACK32, ISL_FORMAT_R8G8B8A8_UNORM),
> + fmt1(VK_FORMAT_A8B8G8R8_SNORM_PACK32, ISL_FORMAT_R8G8B8A8_SNORM),
> + fmt1(VK_FORMAT_A8B8G8R8_USCALED_PACK32, ISL_FORMAT_R8G8B8A8_USCALED),
> + fmt1(VK_FORMAT_A8B8G8R8_SSCALED_PACK32, ISL_FORMAT_R8G8B8A8_SSCALED),
> + fmt1(VK_FORMAT_A8B8G8R8_UINT_PACK32, ISL_FORMAT_R8G8B8A8_UINT),
> + fmt1(VK_FORMAT_A8B8G8R8_SINT_PACK32, ISL_FORMAT_R8G8B8A8_SINT),
> + fmt1(VK_FORMAT_A8B8G8R8_SRGB_PACK32, ISL_FORMAT_R8G8B8A8_UNORM_SRGB
> ),
> + fmt1(VK_FORMAT_A2R10G10B10_UNORM_PACK32,
> ISL_FORMAT_B10G10R10A2_UNORM),
> + fmt1(VK_FORMAT_A2R10G10B10_SNORM_PACK32,
> ISL_FORMAT_B10G10R10A2_SNORM),
> + fmt1(VK_FORMAT_A2R10G10B10_USCALED_PACK32,
> ISL_FORMAT_B10G10R10A2_USCALED),
> + fmt1(VK_FORMAT_A2R10G10B10_SSCALED_PACK32,
> ISL_FORMAT_B10G10R10A2_SSCALED),
> + fmt1(VK_FORMAT_A2R10G10B10_UINT_PACK32, ISL_FORMAT_B10G10R10A2_UINT),
> + fmt1(VK_FORMAT_A2R10G10B10_SINT_PACK32, ISL_FORMAT_B10G10R10A2_SINT),
> + fmt1(VK_FORMAT_A2B10G10R10_UNORM_PACK32,
> ISL_FORMAT_R10G10B10A2_UNORM),
> + fmt1(VK_FORMAT_A2B10G10R10_SNORM_PACK32,
> ISL_FORMAT_R10G10B10A2_SNORM),
> + fmt1(VK_FORMAT_A2B10G10R10_USCALED_PACK32,
> ISL_FORMAT_R10G10B10A2_USCALED),
> + fmt1(VK_FORMAT_A2B10G10R10_SSCALED_PACK32,
> ISL_FORMAT_R10G10B10A2_SSCALED),
> + fmt1(VK_FORMAT_A2B10G10R10_UINT_PACK32, ISL_FORMAT_R10G10B10A2_UINT),
> + fmt1(VK_FORMAT_A2B10G10R10_SINT_PACK32, ISL_FORMAT_R10G10B10A2_SINT),
> + fmt1(VK_FORMAT_R16_UNORM, ISL_FORMAT_R16_UNORM),
> + fmt1(VK_FORMAT_R16_SNORM, ISL_FORMAT_R16_SNORM),
> + fmt1(VK_FORMAT_R16_USCALED, ISL_FORMAT_R16_USCALED),
> + fmt1(VK_FORMAT_R16_SSCALED, ISL_FORMAT_R16_SSCALED),
> + fmt1(VK_FORMAT_R16_UINT, ISL_FORMAT_R16_UINT),
> + fmt1(VK_FORMAT_R16_SINT, ISL_FORMAT_R16_SINT),
> + fmt1(VK_FORMAT_R16_SFLOAT, ISL_FORMAT_R16_FLOAT),
> + fmt1(VK_FORMAT_R16G16_UNORM, ISL_FORMAT_R16G16_UNORM),
> + fmt1(VK_FORMAT_R16G16_SNORM, ISL_FORMAT_R16G16_SNORM),
> + fmt1(VK_FORMAT_R16G16_USCALED, ISL_FORMAT_R16G16_USCALED),
> + fmt1(VK_FORMAT_R16G16_SSCALED, ISL_FORMAT_R16G16_SSCALED),
> + fmt1(VK_FORMAT_R16G16_UINT, ISL_FORMAT_R16G16_UINT),
> + fmt1(VK_FORMAT_R16G16_SINT, ISL_FORMAT_R16G16_SINT),
> + fmt1(VK_FORMAT_R16G16_SFLOAT, ISL_FORMAT_R16G16_FLOAT),
> + fmt1(VK_FORMAT_R16G16B16_UNORM, ISL_FORMAT_R16G16B16_UNORM),
> + fmt1(VK_FORMAT_R16G16B16_SNORM, ISL_FORMAT_R16G16B16_SNORM),
> + fmt1(VK_FORMAT_R16G16B16_USCALED, ISL_FORMAT_R16G16B16_USCALED),
> + fmt1(VK_FORMAT_R16G16B16_SSCALED, ISL_FORMAT_R16G16B16_SSCALED),
> + fmt1(VK_FORMAT_R16G16B16_UINT, ISL_FORMAT_R16G16B16_UINT),
> + fmt1(VK_FORMAT_R16G16B16_SINT, ISL_FORMAT_R16G16B16_SINT),
> + fmt1(VK_FORMAT_R16G16B16_SFLOAT, ISL_FORMAT_R16G16B16_FLOAT),
> + fmt1(VK_FORMAT_R16G16B16A16_UNORM, ISL_FORMAT_R16G16B16A16_UNORM)
> ,
> + fmt1(VK_FORMAT_R16G16B16A16_SNORM, ISL_FORMAT_R16G16B16A16_SNORM)
> ,
> + fmt1(VK_FORMAT_R16G16B16A16_USCALED, ISL_FORMAT_R16G16B16A16_USCALE
> D),
> + fmt1(VK_FORMAT_R16G16B16A16_SSCALED, ISL_FORMAT_R16G16B16A16_SSCALE
> D),
> + fmt1(VK_FORMAT_R16G16B16A16_UINT, ISL_FORMAT_R16G16B16A16_UINT),
> + fmt1(VK_FORMAT_R16G16B16A16_SINT, ISL_FORMAT_R16G16B16A16_SINT),
> + fmt1(VK_FORMAT_R16G16B16A16_SFLOAT, ISL_FORMAT_R16G16B16A16_
> FLOAT),
> + fmt1(VK_FORMAT_R32_UINT, ISL_FORMAT_R32_UINT),
> + fmt1(VK_FORMAT_R32_SINT, ISL_FORMAT_R32_SINT),
> + fmt1(VK_FORMAT_R32_SFLOAT, ISL_FORMAT_R32_FLOAT),
> + fmt1(VK_FORMAT_R32G32_UINT, ISL_FORMAT_R32G32_UINT),
> + fmt1(VK_FORMAT_R32G32_SINT, ISL_FORMAT_R32G32_SINT),
> + fmt1(VK_FORMAT_R32G32_SFLOAT, ISL_FORMAT_R32G32_FLOAT),
> + fmt1(VK_FORMAT_R32G32B32_UINT, ISL_FORMAT_R32G32B32_UINT),
> + fmt1(VK_FORMAT_R32G32B32_SINT, ISL_FORMAT_R32G32B32_SINT),
> + fmt1(VK_FORMAT_R32G32B32_SFLOAT, ISL_FORMAT_R32G32B32_FLOAT),
> + fmt1(VK_FORMAT_R32G32B32A32_UINT, ISL_FORMAT_R32G32B32A32_UINT),
> + fmt1(VK_FORMAT_R32G32B32A32_SINT, ISL_FORMAT_R32G32B32A32_SINT),
> + fmt1(VK_FORMAT_R32G32B32A32_SFLOAT, ISL_FORMAT_R32G32B32A32_
> FLOAT),
> + fmt1(VK_FORMAT_R64_UINT, ISL_FORMAT_R64_PASSTHRU),
> + fmt1(VK_FORMAT_R64_SINT, ISL_FORMAT_R64_PASSTHRU),
> + fmt1(VK_FORMAT_R64_SFLOAT, ISL_FORMAT_R64_PASSTHRU),
> + fmt1(VK_FORMAT_R64G64_UINT, ISL_FORMAT_R64G64_PASSTHRU),
> + fmt1(VK_FORMAT_R64G64_SINT, ISL_FORMAT_R64G64_PASSTHRU),
> + fmt1(VK_FORMAT_R64G64_SFLOAT, ISL_FORMAT_R64G64_PASSTHRU),
> + fmt1(VK_FORMAT_R64G64B64_UINT, ISL_FORMAT_R64G64B64_PASSTHRU)
> ,
> + fmt1(VK_FORMAT_R64G64B64_SINT, ISL_FORMAT_R64G64B64_PASSTHRU)
> ,
> + fmt1(VK_FORMAT_R64G64B64_SFLOAT, ISL_FORMAT_R64G64B64_PASSTHRU)
> ,
> + fmt1(VK_FORMAT_R64G64B64A64_UINT, ISL_FORMAT_R64G64B64A64_PASST
> HRU),
> + fmt1(VK_FORMAT_R64G64B64A64_SINT, ISL_FORMAT_R64G64B64A64_PASST
> HRU),
> + fmt1(VK_FORMAT_R64G64B64A64_SFLOAT, ISL_FORMAT_R64G64B64A64_PASST
> HRU),
> + fmt1(VK_FORMAT_B10G11R11_UFLOAT_PACK32, ISL_FORMAT_R11G11B10_FLOAT),
> + fmt1(VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, ISL_FORMAT_R9G9B9E5_SHAREDEXP)
> ,
> +
> + fmt1(VK_FORMAT_D16_UNORM, ISL_FORMAT_R16_UNORM),
> + fmt1(VK_FORMAT_X8_D24_UNORM_PACK32, ISL_FORMAT_R24_UNORM_X8_TYPEL
> ESS),
> + fmt1(VK_FORMAT_D32_SFLOAT, ISL_FORMAT_R32_FLOAT),
> + fmt1(VK_FORMAT_S8_UINT, ISL_FORMAT_R8_UINT),
> + fmt1(VK_FORMAT_D16_UNORM_S8_UINT, ISL_FORMAT_UNSUPPORTED),
> + ds_fmt(VK_FORMAT_D24_UNORM_S8_UINT, ISL_FORMAT_R24_UNORM_X8_TYPELESS,
> ISL_FORMAT_R8_UINT),
> + ds_fmt(VK_FORMAT_D32_SFLOAT_S8_UINT, ISL_FORMAT_R32_FLOAT,
> ISL_FORMAT_R8_UINT),
> +
> + swiz_fmt1(VK_FORMAT_BC1_RGB_UNORM_BLOCK, ISL_FORMAT_BC1_UNORM,
> RGB1),
> + swiz_fmt1(VK_FORMAT_BC1_RGB_SRGB_BLOCK,
> ISL_FORMAT_BC1_UNORM_SRGB, RGB1),
> + fmt1(VK_FORMAT_BC1_RGBA_UNORM_BLOCK, ISL_FORMAT_BC1_UNORM),
> + fmt1(VK_FORMAT_BC1_RGBA_SRGB_BLOCK, ISL_FORMAT_BC1_UNORM_SRGB),
> + fmt1(VK_FORMAT_BC2_UNORM_BLOCK, ISL_FORMAT_BC2_UNORM),
> + fmt1(VK_FORMAT_BC2_SRGB_BLOCK, ISL_FORMAT_BC2_UNORM_SRGB),
> + fmt1(VK_FORMAT_BC3_UNORM_BLOCK, ISL_FORMAT_BC3_UNORM),
> + fmt1(VK_FORMAT_BC3_SRGB_BLOCK, ISL_FORMAT_BC3_UNORM_SRGB),
> + fmt1(VK_FORMAT_BC4_UNORM_BLOCK, ISL_FORMAT_BC4_UNORM),
> + fmt1(VK_FORMAT_BC4_SNORM_BLOCK, ISL_FORMAT_BC4_SNORM),
> + fmt1(VK_FORMAT_BC5_UNORM_BLOCK, ISL_FORMAT_BC5_UNORM),
> + fmt1(VK_FORMAT_BC5_SNORM_BLOCK, ISL_FORMAT_BC5_SNORM),
> + fmt1(VK_FORMAT_BC6H_UFLOAT_BLOCK, ISL_FORMAT_BC6H_UF16),
> + fmt1(VK_FORMAT_BC6H_SFLOAT_BLOCK, ISL_FORMAT_BC6H_SF16),
> + fmt1(VK_FORMAT_BC7_UNORM_BLOCK, ISL_FORMAT_BC7_UNORM),
> + fmt1(VK_FORMAT_BC7_SRGB_BLOCK, ISL_FORMAT_BC7_UNORM_SRGB),
> + fmt1(VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, ISL_FORMAT_ETC2_RGB8),
> + fmt1(VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, ISL_FORMAT_ETC2_SRGB8),
> + fmt1(VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, ISL_FORMAT_ETC2_RGB8_PTA),
> + fmt1(VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, ISL_FORMAT_ETC2_SRGB8_PTA),
> + fmt1(VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, ISL_FORMAT_ETC2_EAC_RGBA8),
> + fmt1(VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK,
> ISL_FORMAT_ETC2_EAC_SRGB8_A8),
> + fmt1(VK_FORMAT_EAC_R11_UNORM_BLOCK, ISL_FORMAT_EAC_R11),
> + fmt1(VK_FORMAT_EAC_R11_SNORM_BLOCK, ISL_FORMAT_EAC_SIGNED_R11),
> + fmt1(VK_FORMAT_EAC_R11G11_UNORM_BLOCK, ISL_FORMAT_EAC_RG11),
> + fmt1(VK_FORMAT_EAC_R11G11_SNORM_BLOCK, ISL_FORMAT_EAC_SIGNED_RG11),
> + fmt1(VK_FORMAT_ASTC_4x4_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_4X4_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_5x4_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_5X4_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_5x5_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_5X5_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_6x5_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_6X5_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_6x6_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_6X6_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_8x5_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X5_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_8x6_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X6_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_8x8_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X8_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_10x5_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X5_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_10x6_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X6_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_10x8_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X8_U8
> SRGB),
> + fmt1(VK_FORMAT_ASTC_10x10_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X10_
> U8SRGB),
> + fmt1(VK_FORMAT_ASTC_12x10_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_12X10_
> U8SRGB),
> + fmt1(VK_FORMAT_ASTC_12x12_SRGB_BLOCK, ISL_FORMAT_ASTC_LDR_2D_12X12_
> U8SRGB),
> + fmt1(VK_FORMAT_ASTC_4x4_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_4X4_FLT
> 16),
> + fmt1(VK_FORMAT_ASTC_5x4_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_5X4_FLT
> 16),
> + fmt1(VK_FORMAT_ASTC_5x5_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_5X5_FLT
> 16),
> + fmt1(VK_FORMAT_ASTC_6x5_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_6X5_FLT
> 16),
> + fmt1(VK_FORMAT_ASTC_6x6_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_6X6_FLT
> 16),
> + fmt1(VK_FORMAT_ASTC_8x5_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X5_FLT
> 16),
> + fmt1(VK_FORMAT_ASTC_8x6_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X6_FLT
> 16),
> + fmt1(VK_FORMAT_ASTC_8x8_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_8X8_FLT
> 16),
> + fmt1(VK_FORMAT_ASTC_10x5_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X5_F
> LT16),
> + fmt1(VK_FORMAT_ASTC_10x6_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X6_F
> LT16),
> + fmt1(VK_FORMAT_ASTC_10x8_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X8_F
> LT16),
> + fmt1(VK_FORMAT_ASTC_10x10_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_10X10_F
> LT16),
> + fmt1(VK_FORMAT_ASTC_12x10_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_12X10_F
> LT16),
> + fmt1(VK_FORMAT_ASTC_12x12_UNORM_BLOCK, ISL_FORMAT_ASTC_LDR_2D_12X12_F
> LT16),
> + fmt1(VK_FORMAT_B8G8R8_UNORM, ISL_FORMAT_UNSUPPORTED),
>
While we're at it, maybe add a fmt_unsupported macro.
> + fmt1(VK_FORMAT_B8G8R8_SNORM, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8_USCALED, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8_SSCALED, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8_UINT, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8_SINT, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8_SRGB, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8A8_UNORM, ISL_FORMAT_B8G8R8A8_UNORM),
> + fmt1(VK_FORMAT_B8G8R8A8_SNORM, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8A8_USCALED, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8A8_SSCALED, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8A8_UINT, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8A8_SINT, ISL_FORMAT_UNSUPPORTED),
> + fmt1(VK_FORMAT_B8G8R8A8_SRGB, ISL_FORMAT_B8G8R8A8_UNORM_SRG
> B),
> +};
> +
> +static const struct {
> + const struct anv_format *formats;
> + uint32_t n_formats;
> +} anv_formats[] = {
> + [0] = { .formats = main_formats, .n_formats =
> ARRAY_SIZE(main_formats), },
> };
>
> +#undef _fmt
> +#undef swiz_fmt1
> +#undef fmt1
> #undef fmt
>
> -static bool
> -format_supported(VkFormat vk_format)
> +const struct anv_format *
> +anv_get_format(VkFormat vk_format)
> {
> - if (vk_format >= ARRAY_SIZE(anv_formats))
> - return false;
> + uint32_t enum_offset = vk_enum_offset(vk_format);
> + uint32_t ext_number = vk_enum_extension(vk_format);
>
> - return anv_formats[vk_format].isl_format != ISL_FORMAT_UNSUPPORTED;
> + if (ext_number >= ARRAY_SIZE(anv_formats) ||
> + enum_offset >= anv_formats[ext_number].n_formats ||
> + anv_formats[ext_number].formats[enum_offset].n_planes == 0 ||
> + anv_formats[ext_number].formats[enum_offset].planes[0].isl_format
> == ISL_FORMAT_UNSUPPORTED)
> + return NULL;
> +
> + return &anv_formats[ext_number].formats[enum_offset];
> }
>
> /**
> * Exactly one bit must be set in \a aspect.
> */
> -struct anv_format
> -anv_get_format(const struct gen_device_info *devinfo, VkFormat vk_format,
> - VkImageAspectFlags aspect, VkImageTiling tiling)
> +struct anv_format_plane
> +anv_get_plane_format(const struct gen_device_info *devinfo, VkFormat
> vk_format,
> + VkImageAspectFlags aspect, VkImageTiling tiling)
> {
> - if (!format_supported(vk_format))
> - return anv_formats[VK_FORMAT_UNDEFINED];
> + const struct anv_format *format = anv_get_format(vk_format);
> + struct anv_format_plane plane_format = {
> + .isl_format = ISL_FORMAT_UNSUPPORTED,
> + };
>
> - struct anv_format format = anv_formats[vk_format];
> + if (format == NULL)
> + return plane_format;
>
> - if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
> - assert(vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_STENCIL_BIT);
> - format.isl_format = ISL_FORMAT_R8_UINT;
> - return format;
> - }
> + uint32_t plane = anv_image_aspect_to_plane(vk_format_aspects(vk_format),
> aspect);
> + plane_format = format->planes[plane];
> + if (plane_format.isl_format == ISL_FORMAT_UNSUPPORTED)
>
I don't think we'll ever hit this case in practice. anv_get_format will
return NULL if plane 0 has ISL_FORMAT_UNSUPPORTED. If you're asking for
higher planes than exist, then I think you're using this helper wrong.
> + return plane_format;
>
> - if (aspect & VK_IMAGE_ASPECT_DEPTH_BIT) {
> - assert(vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT);
> - return format;
> + if (aspect & (VK_IMAGE_ASPECT_DEPTH_BIT |
> VK_IMAGE_ASPECT_STENCIL_BIT)) {
> + assert(vk_format_aspects(vk_format) &
> + (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
> + return plane_format;
> }
>
> assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
> assert(vk_format_aspects(vk_format) == VK_IMAGE_ASPECT_COLOR_BIT);
>
> const struct isl_format_layout *isl_layout =
> - isl_format_get_layout(format.isl_format);
> + isl_format_get_layout(plane_format.isl_format);
>
> if (tiling == VK_IMAGE_TILING_OPTIMAL &&
> !util_is_power_of_two(isl_layout->bpb)) {
> @@ -295,13 +333,14 @@ anv_get_format(const struct gen_device_info
> *devinfo, VkFormat vk_format,
> * this by switching them over to RGBX or RGBA formats under the
> * hood.
> */
> - enum isl_format rgbx = isl_format_rgb_to_rgbx(format.isl_format);
> + enum isl_format rgbx = isl_format_rgb_to_rgbx(plane_f
> ormat.isl_format);
> if (rgbx != ISL_FORMAT_UNSUPPORTED &&
> isl_format_supports_rendering(devinfo, rgbx)) {
> - format.isl_format = rgbx;
> + plane_format.isl_format = rgbx;
> } else {
> - format.isl_format = isl_format_rgb_to_rgba(format.isl_format);
> - format.swizzle = ISL_SWIZZLE(RED, GREEN, BLUE, ONE);
> + plane_format.isl_format =
> + isl_format_rgb_to_rgba(plane_format.isl_format);
> + plane_format.swizzle = ISL_SWIZZLE(RED, GREEN, BLUE, ONE);
> }
> }
>
> @@ -309,20 +348,18 @@ anv_get_format(const struct gen_device_info
> *devinfo, VkFormat vk_format,
> * back to a format with a more complex swizzle.
> */
> if (vk_format == VK_FORMAT_B4G4R4A4_UNORM_PACK16 && devinfo->gen < 8)
> {
> - return (struct anv_format) {
> - .isl_format = ISL_FORMAT_B4G4R4A4_UNORM,
> - .swizzle = ISL_SWIZZLE(GREEN, RED, ALPHA, BLUE),
> - };
> + plane_format.isl_format = ISL_FORMAT_B4G4R4A4_UNORM;
> + plane_format.swizzle = ISL_SWIZZLE(GREEN, RED, ALPHA, BLUE);
>
This whole function is a mess but that's not your fault. I may clean some
of this up after we land the extension.
> }
>
> - return format;
> + return plane_format;
> }
>
> // Format capabilities
>
> static VkFormatFeatureFlags
> get_image_format_properties(const struct gen_device_info *devinfo,
> - enum isl_format base, struct anv_format
> format)
> + enum isl_format base, struct anv_format_plane
> format)
> {
> if (format.isl_format == ISL_FORMAT_UNSUPPORTED)
> return 0;
> @@ -392,19 +429,20 @@ get_buffer_format_properties(const struct
> gen_device_info *devinfo,
>
> static void
> anv_physical_device_get_format_properties(struct anv_physical_device
> *physical_device,
> - VkFormat format,
> + VkFormat vk_format,
> VkFormatProperties
> *out_properties)
> {
> int gen = physical_device->info.gen * 10;
> if (physical_device->info.is_haswell)
> gen += 5;
>
> + const struct anv_format *format = anv_get_format(vk_format);
> VkFormatFeatureFlags linear = 0, tiled = 0, buffer = 0;
> - if (!format_supported(format)) {
> + if (format == NULL) {
> /* Nothing to do here */
> - } else if (vk_format_is_depth_or_stencil(format)) {
> + } else if (vk_format_is_depth_or_stencil(vk_format)) {
> tiled |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
> - if (vk_format_aspects(format) == VK_IMAGE_ASPECT_DEPTH_BIT ||
> + if (vk_format_aspects(vk_format) == VK_IMAGE_ASPECT_DEPTH_BIT ||
> physical_device->info.gen >= 8)
> tiled |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
>
> @@ -413,13 +451,13 @@ anv_physical_device_get_format_properties(struct
> anv_physical_device *physical_d
> VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR |
> VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR;
> } else {
> - struct anv_format linear_fmt, tiled_fmt;
> - linear_fmt = anv_get_format(&physical_device->info, format,
> - VK_IMAGE_ASPECT_COLOR_BIT,
> - VK_IMAGE_TILING_LINEAR);
> - tiled_fmt = anv_get_format(&physical_device->info, format,
> - VK_IMAGE_ASPECT_COLOR_BIT,
> - VK_IMAGE_TILING_OPTIMAL);
> + struct anv_format_plane linear_fmt, tiled_fmt;
> + linear_fmt = anv_get_plane_format(&physical_device->info,
> vk_format,
> + VK_IMAGE_ASPECT_COLOR_BIT,
> + VK_IMAGE_TILING_LINEAR);
> + tiled_fmt = anv_get_plane_format(&physical_device->info, vk_format,
> + VK_IMAGE_ASPECT_COLOR_BIT,
> + VK_IMAGE_TILING_OPTIMAL);
>
> linear = get_image_format_properties(&physical_device->info,
> linear_fmt.isl_format,
> linear_fmt);
> @@ -497,8 +535,9 @@ anv_get_image_format_properties(
> uint32_t maxMipLevels;
> uint32_t maxArraySize;
> VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
> + const struct anv_format *format = anv_get_format(info->format);
>
> - if (!format_supported(info->format))
> + if (format == NULL)
> goto unsupported;
>
> anv_physical_device_get_format_properties(physical_device,
> info->format,
> @@ -552,7 +591,7 @@ anv_get_image_format_properties(
> * * This field cannot be ASTC format if the Surface Type is
> SURFTYPE_1D.
> */
> if (info->type == VK_IMAGE_TYPE_1D &&
> - isl_format_is_compressed(anv_formats[info->format].isl_format)) {
> + isl_format_is_compressed(format->planes[0].isl_format)) {
> goto unsupported;
> }
>
> diff --git a/src/intel/vulkan/anv_image.c b/src/intel/vulkan/anv_image.c
> index 2aa83154f82..905d7d087cc 100644
> --- a/src/intel/vulkan/anv_image.c
> +++ b/src/intel/vulkan/anv_image.c
> @@ -231,8 +231,9 @@ make_surface(const struct anv_device *dev,
> image->extent = anv_sanitize_image_extent(vk_info->imageType,
> vk_info->extent);
>
> - enum isl_format format = anv_get_isl_format(&dev->info,
> vk_info->format,
> - aspect, vk_info->tiling);
> + enum isl_format format =
> + anv_get_isl_plane_format(&dev->info, vk_info->format,
> + aspect, vk_info->tiling);
> assert(format != ISL_FORMAT_UNSUPPORTED);
>
> ok = isl_surf_init(&dev->isl_dev, &anv_surf->isl,
> @@ -734,8 +735,9 @@ anv_CreateImageView(VkDevice _device,
> iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask;
> iview->vk_format = pCreateInfo->format;
>
> - struct anv_format format = anv_get_format(&device->info,
> pCreateInfo->format,
> - range->aspectMask,
> image->tiling);
> + struct anv_format_plane format =
> + anv_get_plane_format(&device->info, pCreateInfo->format,
> + range->aspectMask, image->tiling);
>
> iview->isl = (struct isl_view) {
> .format = format.isl_format,
> @@ -931,9 +933,9 @@ anv_CreateBufferView(VkDevice _device,
>
> /* TODO: Handle the format swizzle? */
>
> - view->format = anv_get_isl_format(&device->info, pCreateInfo->format,
> - VK_IMAGE_ASPECT_COLOR_BIT,
> - VK_IMAGE_TILING_LINEAR);
> + view->format = anv_get_isl_plane_format(&device->info,
> pCreateInfo->format,
> + VK_IMAGE_ASPECT_COLOR_BIT,
> + VK_IMAGE_TILING_LINEAR);
> const uint32_t format_bs = isl_format_get_layout(view->format)->bpb /
> 8;
> view->bo = buffer->bo;
> view->offset = buffer->offset + pCreateInfo->offset;
> diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private
> .h
> index df8de8d9407..fde562294c0 100644
> --- a/src/intel/vulkan/anv_private.h
> +++ b/src/intel/vulkan/anv_private.h
> @@ -2159,20 +2159,60 @@ anv_pipeline_compile_cs(struct anv_pipeline
> *pipeline,
> const char *entrypoint,
> const VkSpecializationInfo *spec_info);
>
> -struct anv_format {
> +struct anv_format_plane {
> enum isl_format isl_format:16;
> struct isl_swizzle swizzle;
> };
>
> -struct anv_format
> -anv_get_format(const struct gen_device_info *devinfo, VkFormat format,
> - VkImageAspectFlags aspect, VkImageTiling tiling);
> +
> +struct anv_format {
> + struct anv_format_plane planes[3];
> + uint8_t n_planes;
> +};
> +
> +static inline uint32_t
> +anv_image_aspect_to_plane(VkImageAspectFlags image_aspects,
> + VkImageAspectFlags aspect_mask)
> +{
> + switch (aspect_mask) {
> + case VK_IMAGE_ASPECT_COLOR_BIT:
> + case VK_IMAGE_ASPECT_DEPTH_BIT:
> + case VK_IMAGE_ASPECT_PLANE_0_BIT_KHR:
> + return 0;
> + case VK_IMAGE_ASPECT_STENCIL_BIT:
> + if ((image_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) == 0)
> + return 0;
> + /* Fall-through */
> + case VK_IMAGE_ASPECT_PLANE_1_BIT_KHR:
> + return 1;
> + case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR:
> + return 2;
> + default:
> + unreachable("invalid image aspect");
> + }
> +}
> +
> +const struct anv_format *
> +anv_get_format(VkFormat format);
> +
> +static inline uint32_t
> +anv_get_format_planes(VkFormat vk_format)
>
get_format_num_planes?
> +{
> + const struct anv_format *format = anv_get_format(vk_format);
> +
> + return format != NULL ? format->n_planes : 0;
> +}
> +
> +struct anv_format_plane
> +anv_get_plane_format(const struct gen_device_info *devinfo, VkFormat
> vk_format,
>
get_format_plane?
Also, it's a bit weird to me that
> + VkImageAspectFlags aspect, VkImageTiling tiling);
>
> static inline enum isl_format
> -anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat
> vk_format,
> - VkImageAspectFlags aspect, VkImageTiling tiling)
> +anv_get_isl_plane_format(const struct gen_device_info *devinfo,
>
Does this really need to be renamed? We're not changing the parameters or
return type as it already takes an aspect. Not renaming it would reduce
the churn quite a bit.
> + VkFormat vk_format, VkImageAspectFlags aspect,
> + VkImageTiling tiling)
> {
> - return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
> + return anv_get_plane_format(devinfo, vk_format, aspect,
> tiling).isl_format;
> }
>
> static inline struct isl_swizzle
> diff --git a/src/intel/vulkan/genX_pipeline.c
> b/src/intel/vulkan/genX_pipeline.c
> index 6dfa49b8737..5350f54b39e 100644
> --- a/src/intel/vulkan/genX_pipeline.c
> +++ b/src/intel/vulkan/genX_pipeline.c
> @@ -119,10 +119,10 @@ emit_vertex_input(struct anv_pipeline *pipeline,
> for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
> const VkVertexInputAttributeDescription *desc =
> &info->pVertexAttributeDescriptions[i];
> - enum isl_format format = anv_get_isl_format(&pipeline->
> device->info,
> - desc->format,
> -
> VK_IMAGE_ASPECT_COLOR_BIT,
> - VK_IMAGE_TILING_LINEAR);
> + enum isl_format format =
> + anv_get_isl_plane_format(&pipeline->device->info, desc->format,
> + VK_IMAGE_ASPECT_COLOR_BIT,
> + VK_IMAGE_TILING_LINEAR);
>
> assert(desc->binding < MAX_VBS);
>
> @@ -504,9 +504,9 @@ emit_rs_state(struct anv_pipeline *pipeline,
> assert(vk_format_is_depth_or_stencil(vk_format));
> if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) {
> enum isl_format isl_format =
> - anv_get_isl_format(&pipeline->device->info, vk_format,
> - VK_IMAGE_ASPECT_DEPTH_BIT,
> - VK_IMAGE_TILING_OPTIMAL);
> + anv_get_isl_plane_format(&pipeline->device->info, vk_format,
> + VK_IMAGE_ASPECT_DEPTH_BIT,
> + VK_IMAGE_TILING_OPTIMAL);
> sf.DepthBufferSurfaceFormat =
> isl_format_get_depth_format(isl_format, false);
> }
> --
> 2.14.1
>
> _______________________________________________
> mesa-dev mailing list
> mesa-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/mesa-dev
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/mesa-dev/attachments/20170918/8a6d78df/attachment-0001.html>
More information about the mesa-dev
mailing list