[Intel-gfx] [PATCH 4/6] drm/i915/chv: Implement color management

Daniel Stone daniel at fooishbar.org
Fri Dec 18 08:53:44 PST 2015


Hi,

On 17 December 2015 at 18:57, Lionel Landwerlin
<lionel.g.landwerlin at intel.com> wrote:
> @@ -311,7 +312,9 @@ static const struct intel_device_info intel_cherryview_info = {
>         .gen = 8, .num_pipes = 3,
>         .need_gfx_hws = 1, .has_hotplug = 1,
>         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
> -       .is_cherryview = 1,
> +       .is_valleyview = 1,

... ?!

> +       .num_samples_after_ctm = CHV_10BIT_GAMMA_MAX_VALS,
> +       .num_samples_before_ctm = CHV_DEGAMMA_MAX_VALS,

Switch the order of these please.

> +static u16 chv_prepare_csc_coeff(s64 csc_coeff)
> +{
> +       u16 csc_s3_12_format = 0;
> +       u16 csc_int_value;
> +       u16 csc_fract_value;
> +
> +       if (csc_coeff < 0)
> +               csc_s3_12_format = CSC_COEFF_SIGN;
> +       csc_coeff = abs(csc_coeff);
> +       csc_coeff += CHV_CSC_FRACT_ROUNDOFF;
> +       if (csc_coeff > CHV_CSC_COEFF_MAX + 1)
> +               csc_coeff = CHV_CSC_COEFF_MAX + 1;
> +       else if (csc_coeff > CHV_CSC_COEFF_MAX)
> +               csc_coeff = CHV_CSC_COEFF_MAX;
> +
> +       csc_int_value = csc_coeff >> CHV_CSC_COEFF_SHIFT;
> +       csc_int_value <<= CHV_CSC_COEFF_INT_SHIFT;
> +
> +       csc_fract_value = csc_coeff >> CHV_CSC_COEFF_FRACT_SHIFT;
> +
> +       csc_s3_12_format |= csc_int_value | csc_fract_value;

Um. I'm at a loss trying to disentangle these. Here, (1 << 15) /
0x8000 is used as the sign bit; fair enough.

But, anything above 0x7ffffffff generates 0x8000 as a result, which
s3.12 would lead me to read as -7.999...; 0x7ffffffff itself generates
+7.999.... So huge positive numbers wrap around into your minimum
signed value. Is this deliberate? Is it covered by IGT?

> +static int chv_set_csc(struct drm_device *dev, struct drm_property_blob *blob,
> +               struct drm_crtc *crtc)
> +{
> +       u16 temp;
> +       u32 word = 0;
> +       int count = 0;
> +       i915_reg_t val;
> +       struct drm_ctm *csc_data;
> +       struct drm_i915_private *dev_priv = dev->dev_private;
> +       enum pipe pipe;
> +
> +       if (WARN_ON(!blob))
> +               return -EINVAL;

Why not let blob == NULL just disable CSC here? (How do you even disable it?)

> +       if (blob->length != sizeof(struct drm_ctm)) {
> +               DRM_ERROR("Invalid length of data received\n");
> +               return -EINVAL;
> +       }

This needs to be checked earlier (in the atomic_set_property hook):
failing commit is not OK.

> +static int chv_set_degamma(struct drm_device *dev,
> +       struct drm_property_blob *blob, struct drm_crtc *crtc)

What I said about the documentation around post-CTM vs. degamma makes
even more sense now. ;)

> +{
> +       u16 red_fract, green_fract, blue_fract;
> +       u32 red, green, blue;
> +       u32 num_samples;
> +       u32 word = 0;
> +       u32 count, cgm_control, cgm_degamma_reg;
> +       i915_reg_t val;
> +       enum pipe pipe;
> +       struct drm_palette *degamma_data;
> +       struct drm_i915_private *dev_priv = dev->dev_private;
> +       struct drm_r32g32b32 *correction_values = NULL;
> +       struct drm_crtc_state *state = crtc->state;
> +
> +       if (WARN_ON(!blob))
> +               return -EINVAL;

Same comment as above.

> +       degamma_data = (struct drm_palette *)blob->data;
> +       pipe = to_intel_crtc(crtc)->pipe;
> +       num_samples = blob->length / sizeof(struct drm_r32g32b32);
> +
> +       switch (num_samples) {
> +       case GAMMA_DISABLE_VALS:
> +               /* Disable DeGamma functionality on Pipe - CGM Block */

Just use blob == NULL for this.

> +               val = _MMIO(_PIPE_CGM_CONTROL(pipe));
> +               cgm_control = I915_READ(val);
> +               cgm_control &= ~CGM_DEGAMMA_EN;
> +               state->palette_before_ctm_blob = NULL;

This is really broken. In order to disable, you create a blob with len
== 0, which instantly drops itself without unreferencing, i.e. no
leak.

Also, I thought RMW of registers was frowned upon? Maybe not.

> +       default:
> +               DRM_ERROR("Invalid number of samples for DeGamma LUT\n");
> +               return -EINVAL;
> +       }

This could just become another early-exit path. But again, this needs
to be checked in, well, atomic_check, not commit.

> +static int chv_set_gamma(struct drm_device *dev, struct drm_property_blob *blob,
> +               struct drm_crtc *crtc)
> +{
> +       enum pipe pipe;
> +       u16 red_fract, green_fract, blue_fract;
> +       u32 red, green, blue, num_samples;
> +       u32 word = 0;
> +       u32 count, cgm_gamma_reg, cgm_control;
> +       i915_reg_t val;
> +       struct drm_r32g32b32 *correction_values;
> +       struct drm_palette *gamma_data;
> +       struct drm_i915_private *dev_priv = dev->dev_private;
> +       struct drm_crtc_state *state = crtc->state;
> +
> +       if (WARN_ON(!blob))
> +               return -EINVAL;

As above.

> +       switch (num_samples) {
> +       case GAMMA_DISABLE_VALS:

Ditto.

> +               /* Disable Gamma functionality on Pipe - CGM Block */
> +               val = _MMIO(_PIPE_CGM_CONTROL(pipe));
> +               cgm_control = I915_READ(val);
> +               cgm_control &= ~CGM_GAMMA_EN;
> +               I915_WRITE(val, cgm_control);
> +               state->palette_after_ctm_blob = NULL;

AOL.

> +       case CHV_8BIT_GAMMA_MAX_VALS:
> +       case CHV_10BIT_GAMMA_MAX_VALS:

Hm. At no point does the following code account for the difference
between these two. What happens if you set once with
10BIT_GAMMA_MAX_VALS followed by once with 8BIT_MAX_VALS? Won't that
break expectation of the second user, because you never wrote to the
257th entry?

Also, this just reads 10 bits out of the values regardless. Has 8-bit
been tested on CHV? It seems like it'd be totally broken, as this just
assumes a 10-bit range regardless.

> +       default:
> +               DRM_ERROR("Invalid number of samples (%u) for Gamma LUT\n",
> +                               num_samples);
> +               return -EINVAL;
> +       }

See previous.

> +void intel_color_manager_commit(struct drm_device *dev,
> +                               struct drm_crtc_state *crtc_state)
> +{
> +       struct drm_property_blob *blob;
> +       struct drm_crtc *crtc = crtc_state->crtc;
> +       int ret = -EINVAL;

Why is this initialised, especially when not actually used as a return value?

> +       blob = crtc_state->palette_after_ctm_blob;
> +       if (blob) {

Drop the conditional and pass it through regardless, so you can use
NULL to disable.

> +               /* Gamma correction is platform specific */
> +               if (IS_CHERRYVIEW(dev))
> +                       ret = chv_set_gamma(dev, blob, crtc);
> +
> +               if (ret)
> +                       DRM_ERROR("set Gamma correction failed\n");
> +               else
> +                       DRM_DEBUG_DRIVER("Gamma correction success\n");
> +       }

So we just error but carry on? Again, this (and all the following)
need to be in check, not commit. And you can just drop the 'success'
message.

> +void intel_crtc_attach_color_properties(struct drm_crtc *crtc)
> +{
> +}

... ?

> +/* Color management bit utilities */
> +#define GET_BIT_MASK(n) ((1 << n) - 1)
> +
> +/* Read bits of a word from bit no. 'start'(lsb) till 'n' bits */
> +#define GET_BITS(x, start, nbits) ((x >> start) & GET_BIT_MASK(nbits))
>
> +/* Round off by adding 1 to the immediate lower bit */
> +#define GET_BITS_ROUNDOFF(x, start, nbits) \
> +       ((GET_BITS(x, start, (nbits + 1)) + 1) >> 1)
> +
> +/* Clear bits of a word from bit no. 'start' till nbits */
> +#define CLEAR_BITS(x, start, nbits) ( \
> +               x &= ~((GET_BIT_MASK(nbits) << start)))
> +
> +/* Write bit_pattern of no_bits bits in a target word */
> +#define SET_BITS(target, bit_pattern, start_bit, no_bits) \
> +               do { \
> +                       CLEAR_BITS(target, start_bit, no_bits); \
> +                       target |= (bit_pattern << start_bit);  \
> +               } while (0)

Is there actually no common set of macros for these? I would also
expect to see this as lowest_bit -> highest_bit, rather than start_bit
-> (start_bit -> start_bit + nbits/no_bits).

> +/* CHV */
> +#define CHV_10BIT_GAMMA_MAX_VALS               257

Delete this, as it's already declared in the 'Gamma on CHV' block.

> +#define CHV_DEGAMMA_MAX_VALS                   65

Move this to that block.

> +/* No of coeff for disabling gamma is 0 */
> +#define GAMMA_DISABLE_VALS                     0

... and delete this.

> +/* Gamma on CHV */
> +#define CHV_10BIT_GAMMA_MAX_VALS               257
> +#define CHV_8BIT_GAMMA_MAX_VALS                256
> +#define CHV_10BIT_GAMMA_MSB_SHIFT              6
> +#define CHV_GAMMA_SHIFT_GREEN                  16
> +#define CHV_MAX_GAMMA                          ((1 << 24) - 1)

Shouldn't drm_palette or similar declare that the range of gamma
values is [0,(1 << 24) -1], or just do some normalisation, rather than
relying on people to know that CHV has a different gamma range than
the type would lead you to believe?

> +/*
> + * CSC values are 64-bit values. For CHV, the maximum CSC value that
> + * user can program is 7.99999..., which can be represented in fixed point
> + * S31.32 format like this, with all fractional bits as 1
> + */

As with (de)gamma, might help to note that CSC is CTM.

Also, this means that the comment I noted in the drm_ctm structure is
wrong: the range really is [0..8.0) rather than [0..1.0].
> +#define CSC_MAX_VALS                           9

Isn't MAX_VALS fixed at 9 everywhere ... ?

> +/* Degamma on CHV */
> +#define CHV_DEGAMMA_MSB_SHIFT                  2
> +#define CHV_DEGAMMA_GREEN_SHIFT                16

Slightly baffling, and also unused ... but what does it mean?

Cheers,
Daniel


More information about the Intel-gfx mailing list