On Wed, Apr 09, 2014 at 01:29:02PM +0300, ville.syrj...@linux.intel.com wrote:
> From: Ville Syrjälä <ville.syrj...@linux.intel.com>
> 
> All PCS groups access reads return 0xffffffff, so we can't use group
> access for RMW cycles. Instead target each spline separately.

I have no idea what PCS means here and spline ... Can you please expand
for those who haven't yet lost their souls in chv docs? Just so we have a
commonly-understood jargon for talking about this stuff.

Thanks, Daniel

> 
> Signed-off-by: Ville Syrjälä <ville.syrj...@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_reg.h   | 14 ++++++++++++++
>  drivers/gpu/drm/i915/intel_dp.c   | 32 ++++++++++++++++++++++++--------
>  drivers/gpu/drm/i915/intel_hdmi.c | 34 +++++++++++++++++++++++++---------
>  3 files changed, 63 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index 4617fb3..ffed03e 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -654,6 +654,13 @@ enum punit_power_well {
>  #define   DPIO_PCS_TX_LANE1_RESET    (1<<7)
>  #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
>  
> +#define _VLV_PCS01_DW0_CH0           0x200
> +#define _VLV_PCS23_DW0_CH0           0x400
> +#define _VLV_PCS01_DW0_CH1           0x2600
> +#define _VLV_PCS23_DW0_CH1           0x2800
> +#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
> +#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
> +
>  #define _VLV_PCS_DW1_CH0             0x8204
>  #define _VLV_PCS_DW1_CH1             0x8404
>  #define   CHV_PCS_REQ_SOFTRESET_EN   (1<<23)
> @@ -663,6 +670,13 @@ enum punit_power_well {
>  #define   DPIO_PCS_CLK_SOFT_RESET    (1<<5)
>  #define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
>  
> +#define _VLV_PCS01_DW1_CH0           0x204
> +#define _VLV_PCS23_DW1_CH0           0x404
> +#define _VLV_PCS01_DW1_CH1           0x2604
> +#define _VLV_PCS23_DW1_CH1           0x2804
> +#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
> +#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
> +
>  #define _VLV_PCS_DW8_CH0             0x8220
>  #define _VLV_PCS_DW8_CH1             0x8420
>  #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
> diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> index 079e0e3..cc7bccd3 100644
> --- a/drivers/gpu/drm/i915/intel_dp.c
> +++ b/drivers/gpu/drm/i915/intel_dp.c
> @@ -1845,13 +1845,21 @@ static void chv_post_disable_dp(struct intel_encoder 
> *encoder)
>       mutex_lock(&dev_priv->dpio_lock);
>  
>       /* Propagate soft reset to data lane reset */
> -     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW1(ch));
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
>       val |= CHV_PCS_REQ_SOFTRESET_EN;
> -     vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(ch), val);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
>  
> -     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW0(ch));
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
> +     val |= CHV_PCS_REQ_SOFTRESET_EN;
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
> +     val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
>       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
> -     vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(ch), val);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
>  
>       mutex_unlock(&dev_priv->dpio_lock);
>  }
> @@ -1983,13 +1991,21 @@ static void chv_pre_enable_dp(struct intel_encoder 
> *encoder)
>       mutex_lock(&dev_priv->dpio_lock);
>  
>       /* Deassert soft data lane reset*/
> -     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW1(ch));
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
>       val |= CHV_PCS_REQ_SOFTRESET_EN;
> -     vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(ch), val);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
> +     val |= CHV_PCS_REQ_SOFTRESET_EN;
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
> +     val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
>  
> -     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW0(ch));
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
>       val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
> -     vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(ch), val);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
>  
>       /* Program Tx lane latency optimal setting*/
>       for (i = 0; i < 4; i++) {
> diff --git a/drivers/gpu/drm/i915/intel_hdmi.c 
> b/drivers/gpu/drm/i915/intel_hdmi.c
> index 6a2152b..c3896b0 100644
> --- a/drivers/gpu/drm/i915/intel_hdmi.c
> +++ b/drivers/gpu/drm/i915/intel_hdmi.c
> @@ -1216,13 +1216,21 @@ static void chv_hdmi_post_disable(struct 
> intel_encoder *encoder)
>       mutex_lock(&dev_priv->dpio_lock);
>  
>       /* Propagate soft reset to data lane reset */
> -     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW1(ch));
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
>       val |= CHV_PCS_REQ_SOFTRESET_EN;
> -     vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(ch), val)
> -;
> -     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW0(ch));
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
> +     val |= CHV_PCS_REQ_SOFTRESET_EN;
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
> +     val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
>       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
> -     vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(ch), val);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
>  
>       mutex_unlock(&dev_priv->dpio_lock);
>  }
> @@ -1242,13 +1250,21 @@ static void chv_hdmi_pre_enable(struct intel_encoder 
> *encoder)
>       mutex_lock(&dev_priv->dpio_lock);
>  
>       /* Deassert soft data lane reset*/
> -     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW1(ch));
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
>       val |= CHV_PCS_REQ_SOFTRESET_EN;
> -     vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(ch), val);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
> +     val |= CHV_PCS_REQ_SOFTRESET_EN;
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
> +
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
> +     val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
>  
> -     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW0(ch));
> +     val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
>       val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
> -     vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(ch), val);
> +     vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
>  
>       /* Program Tx latency optimal setting */
>       for (i = 0; i < 4; i++) {
> -- 
> 1.8.3.2
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to