The names of the struct members for RPS are stupid. Every time I need to
do anything in this code I have to spend a significant amount of time to
remember what it all means. By renaming the variables (and adding the
comments) I hope to clear up the situation. Indeed doing this make some
upcoming patches more readable.

I've avoided ILK because it's possible that the naming used for Ironlake
matches what is in the docs. I believe the ILK power docs were never
published, and I am too lazy to dig them up.

While there may be mistakes, this patch was mostly done via sed. The
renaming of "hw_max" required a bit of interactivity.

v2: Updated code comments to be less repetitive and more informative
(Ben)

Cc: Jeff McGee <jeff.mc...@intel.com>
Signed-off-by: Ben Widawsky <b...@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_debugfs.c |  26 ++++----
 drivers/gpu/drm/i915/i915_drv.h     |  31 +++++++---
 drivers/gpu/drm/i915/i915_irq.c     |  24 ++++----
 drivers/gpu/drm/i915/i915_sysfs.c   |  44 +++++++-------
 drivers/gpu/drm/i915/intel_pm.c     | 116 ++++++++++++++++++------------------
 5 files changed, 129 insertions(+), 112 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index d90a707..80087d1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1027,7 +1027,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void 
*unused)
                           max_freq * GT_FREQUENCY_MULTIPLIER);
 
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
-                          dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
+                          dev_priv->rps.max_freq_overclock * 
GT_FREQUENCY_MULTIPLIER);
        } else if (IS_VALLEYVIEW(dev)) {
                u32 freq_sts, val;
 
@@ -1485,8 +1485,8 @@ static int i915_ring_freq_table(struct seq_file *m, void 
*unused)
 
        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring 
freq (MHz)\n");
 
-       for (gpu_freq = dev_priv->rps.min_delay;
-            gpu_freq <= dev_priv->rps.max_delay;
+       for (gpu_freq = dev_priv->rps.min_freq_softlimit;
+            gpu_freq <= dev_priv->rps.max_freq_softlimit;
             gpu_freq++) {
                ia_freq = gpu_freq;
                sandybridge_pcode_read(dev_priv,
@@ -3371,9 +3371,9 @@ i915_max_freq_get(void *data, u64 *val)
                return ret;
 
        if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
        else
-               *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+               *val = dev_priv->rps.max_freq_softlimit * 
GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -3410,16 +3410,16 @@ i915_max_freq_set(void *data, u64 val)
                do_div(val, GT_FREQUENCY_MULTIPLIER);
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq_overclock;
                hw_min = (rp_state_cap >> 16) & 0xff;
        }
 
-       if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+       if (val < hw_min || val > hw_max || val < 
dev_priv->rps.min_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.max_delay = val;
+       dev_priv->rps.max_freq_softlimit = val;
 
        if (IS_VALLEYVIEW(dev))
                valleyview_set_rps(dev, val);
@@ -3452,9 +3452,9 @@ i915_min_freq_get(void *data, u64 *val)
                return ret;
 
        if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
        else
-               *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+               *val = dev_priv->rps.min_freq_softlimit * 
GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -3491,16 +3491,16 @@ i915_min_freq_set(void *data, u64 val)
                do_div(val, GT_FREQUENCY_MULTIPLIER);
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq_overclock;
                hw_min = (rp_state_cap >> 16) & 0xff;
        }
 
-       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+       if (val < hw_min || val > hw_max || val > 
dev_priv->rps.max_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_delay = val;
+       dev_priv->rps.min_freq_softlimit = val;
 
        if (IS_VALLEYVIEW(dev))
                valleyview_set_rps(dev, val);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8c64831..90aa1c5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -967,13 +967,30 @@ struct intel_gen6_power_mgmt {
        struct work_struct work;
        u32 pm_iir;
 
-       u8 cur_delay;
-       u8 min_delay;
-       u8 max_delay;
-       u8 rpe_delay;
-       u8 rp1_delay;
-       u8 rp0_delay;
-       u8 hw_max;
+       /* Frequencies are stored in potentially platform dependent multiples.
+        * In other words, *_freq needs to be multiplied by X to be interesting.
+        * Soft limits may exist for a variety of reasons, but they can usually
+        * be overridden through user intervention. Soft limits are used for the
+        * dynamic reclocking done by the driver (raise frequencies under heavy
+        * loads, and lower for lighter loads). Soft limits default to the min
+        * and max hard limits, but can be restricted via sysfs/debugfs.
+        *
+        * Hard limits are actual limits imposed by the hardware.
+
+        * A distinction is made for overclocking, which is never enabled by
+        * default, and is considered to be above the hard limit if it's
+        * possible at all.
+        *
+        * Nominal frequency is a predetermined value to give the best
+        * performance/power ratio.
+        */
+       u8 cur_freq; /* Current frequency (cached in SW, may not reflect HW) */
+       u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
+       u8 max_freq_softlimit; /* Max frequency permitted by the driver */
+       u8 min_freq_hardlimit; /* AKA RP1 Minimum frequency permitted by the 
GPU */
+       u8 max_freq_hardlimit; /* AKA RP0 Maximum frequency permitted by the 
GPU */
+       u8 nominal_freq; /* AKA RPn Nominal GPU frequency */
+       u8 max_freq_overclock; /* Max overclocking frequency permitted by the 
GPU */
 
        bool rp_up_masked;
        bool rp_down_masked;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f68aee3..49f2627 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1063,7 +1063,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
                             u32 pm_iir, int new_delay)
 {
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-               if (new_delay >= dev_priv->rps.max_delay) {
+               if (new_delay >= dev_priv->rps.max_freq_softlimit) {
                        /* Mask UP THRESHOLD Interrupts */
                        I915_WRITE(GEN6_PMINTRMSK,
                                   I915_READ(GEN6_PMINTRMSK) |
@@ -1078,7 +1078,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
                        dev_priv->rps.rp_down_masked = false;
                }
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
-               if (new_delay <= dev_priv->rps.min_delay) {
+               if (new_delay <= dev_priv->rps.min_freq_softlimit) {
                        /* Mask DOWN THRESHOLD Interrupts */
                        I915_WRITE(GEN6_PMINTRMSK,
                                   I915_READ(GEN6_PMINTRMSK) |
@@ -1124,38 +1124,38 @@ static void gen6_pm_rps_work(struct work_struct *work)
                        adj *= 2;
                else
                        adj = 1;
-               new_delay = dev_priv->rps.cur_delay + adj;
+               new_delay = dev_priv->rps.cur_freq + adj;
 
                /*
                 * For better performance, jump directly
                 * to RPe if we're below it.
                 */
-               if (new_delay < dev_priv->rps.rpe_delay)
-                       new_delay = dev_priv->rps.rpe_delay;
+               if (new_delay < dev_priv->rps.nominal_freq)
+                       new_delay = dev_priv->rps.nominal_freq;
        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
-               if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
-                       new_delay = dev_priv->rps.rpe_delay;
+               if (dev_priv->rps.cur_freq > dev_priv->rps.nominal_freq)
+                       new_delay = dev_priv->rps.nominal_freq;
                else
-                       new_delay = dev_priv->rps.min_delay;
+                       new_delay = dev_priv->rps.min_freq_softlimit;
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
                if (adj < 0)
                        adj *= 2;
                else
                        adj = -1;
-               new_delay = dev_priv->rps.cur_delay + adj;
+               new_delay = dev_priv->rps.cur_freq + adj;
        } else { /* unknown event */
-               new_delay = dev_priv->rps.cur_delay;
+               new_delay = dev_priv->rps.cur_freq;
        }
 
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
         */
        new_delay = clamp_t(int, new_delay,
-                           dev_priv->rps.min_delay, dev_priv->rps.max_delay);
+                           dev_priv->rps.min_freq_softlimit, 
dev_priv->rps.max_freq_softlimit);
 
        gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
-       dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+       dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
 
        if (IS_VALLEYVIEW(dev_priv->dev))
                valleyview_set_rps(dev_priv->dev, new_delay);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c 
b/drivers/gpu/drm/i915/i915_sysfs.c
index e9ffefb..86999aa 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
                freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
                ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
        } else {
-               ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+               ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
-                       vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
+                       vlv_gpu_freq(dev_priv, dev_priv->rps.nominal_freq));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct 
device_attribute *attr, char *buf)
@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, 
struct device_attribute
 
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
-               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
        else
-               ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+               ret = dev_priv->rps.max_freq_softlimit * 
GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -334,13 +334,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                val /= GT_FREQUENCY_MULTIPLIER;
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq_overclock;
                non_oc_max = (rp_state_cap & 0xff);
                hw_min = ((rp_state_cap & 0xff0000) >> 16);
        }
 
        if (val < hw_min || val > hw_max ||
-           val < dev_priv->rps.min_delay) {
+           val < dev_priv->rps.min_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
@@ -349,18 +349,18 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                DRM_DEBUG("User requested overclocking to %d\n",
                          val * GT_FREQUENCY_MULTIPLIER);
 
-       dev_priv->rps.max_delay = val;
+       dev_priv->rps.max_freq_softlimit = val;
 
-       if (dev_priv->rps.cur_delay > val) {
+       if (dev_priv->rps.cur_freq > val) {
                if (IS_VALLEYVIEW(dev))
                        valleyview_set_rps(dev, val);
                else
                        gen6_set_rps(dev, val);
        } else if (!IS_VALLEYVIEW(dev)) {
-               /* We still need gen6_set_rps to process the new max_delay and
-                * update the interrupt limits even though frequency request is
-                * unchanged. */
-               gen6_set_rps(dev, dev_priv->rps.cur_delay);
+               /* We still need gen6_set_rps to process the new
+                * max_freq_softlimit and update the interrupt limits even
+                * though frequency request is unchanged. */
+               gen6_set_rps(dev, dev_priv->rps.cur_freq);
        }
 
        mutex_unlock(&dev_priv->rps.hw_lock);
@@ -379,9 +379,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, 
struct device_attribute
 
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
-               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
        else
-               ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+               ret = dev_priv->rps.min_freq_softlimit * 
GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -414,27 +414,27 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                val /= GT_FREQUENCY_MULTIPLIER;
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq_overclock;
                hw_min = ((rp_state_cap & 0xff0000) >> 16);
        }
 
-       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+       if (val < hw_min || val > hw_max || val > 
dev_priv->rps.max_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_delay = val;
+       dev_priv->rps.min_freq_softlimit = val;
 
-       if (dev_priv->rps.cur_delay < val) {
+       if (dev_priv->rps.cur_freq < val) {
                if (IS_VALLEYVIEW(dev))
                        valleyview_set_rps(dev, val);
                else
                        gen6_set_rps(dev, val);
        } else if (!IS_VALLEYVIEW(dev)) {
-               /* We still need gen6_set_rps to process the new min_delay and
-                * update the interrupt limits even though frequency request is
-                * unchanged. */
-               gen6_set_rps(dev, dev_priv->rps.cur_delay);
+               /* We still need gen6_set_rps to process the new
+                * min_freq_softlimit and update the interrupt limits even
+                * though frequency request is unchanged. */
+               gen6_set_rps(dev, dev_priv->rps.cur_freq);
        }
 
        mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 66172b6..341c154 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2904,9 +2904,9 @@ static u32 gen6_rps_limits(struct drm_i915_private 
*dev_priv, u8 val)
         * the hw runs at the minimal clock before selecting the desired
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt. */
-       limits = dev_priv->rps.max_delay << 24;
-       if (val <= dev_priv->rps.min_delay)
-               limits |= dev_priv->rps.min_delay << 16;
+       limits = dev_priv->rps.max_freq_softlimit << 24;
+       if (val <= dev_priv->rps.min_freq_softlimit)
+               limits |= dev_priv->rps.min_freq_softlimit << 16;
 
        return limits;
 }
@@ -2918,26 +2918,26 @@ static void gen6_set_rps_thresholds(struct 
drm_i915_private *dev_priv, u8 val)
        new_power = dev_priv->rps.power;
        switch (dev_priv->rps.power) {
        case LOW_POWER:
-               if (val > dev_priv->rps.rpe_delay + 1 && val > 
dev_priv->rps.cur_delay)
+               if (val > dev_priv->rps.nominal_freq + 1 && val > 
dev_priv->rps.cur_freq)
                        new_power = BETWEEN;
                break;
 
        case BETWEEN:
-               if (val <= dev_priv->rps.rpe_delay && val < 
dev_priv->rps.cur_delay)
+               if (val <= dev_priv->rps.nominal_freq && val < 
dev_priv->rps.cur_freq)
                        new_power = LOW_POWER;
-               else if (val >= dev_priv->rps.rp0_delay && val > 
dev_priv->rps.cur_delay)
+               else if (val >= dev_priv->rps.max_freq_hardlimit && val > 
dev_priv->rps.cur_freq)
                        new_power = HIGH_POWER;
                break;
 
        case HIGH_POWER:
-               if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) 
>> 1 && val < dev_priv->rps.cur_delay)
+               if (val < (dev_priv->rps.min_freq_hardlimit + 
dev_priv->rps.max_freq_hardlimit) >> 1 && val < dev_priv->rps.cur_freq)
                        new_power = BETWEEN;
                break;
        }
        /* Max/min bins are special */
-       if (val == dev_priv->rps.min_delay)
+       if (val == dev_priv->rps.min_freq_softlimit)
                new_power = LOW_POWER;
-       if (val == dev_priv->rps.max_delay)
+       if (val == dev_priv->rps.max_freq_softlimit)
                new_power = HIGH_POWER;
        if (new_power == dev_priv->rps.power)
                return;
@@ -3004,17 +3004,17 @@ static void gen6_set_rps_thresholds(struct 
drm_i915_private *dev_priv, u8 val)
 }
 
 /* gen6_set_rps is called to update the frequency request, but should also be
- * called when the range (min_delay and max_delay) is modified so that we can
+ * called when the range (min_freq_softlimit and max_freq_softlimit) is 
modified so that we can
  * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-       WARN_ON(val > dev_priv->rps.max_delay);
-       WARN_ON(val < dev_priv->rps.min_delay);
+       WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+       WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
-       if (val == dev_priv->rps.cur_delay) {
+       if (val == dev_priv->rps.cur_freq) {
                /* min/max delay may still have been modified so be sure to
                 * write the limits value */
                I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
@@ -3042,7 +3042,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
 
        POSTING_READ(GEN6_RPNSWREQ);
 
-       dev_priv->rps.cur_delay = val;
+       dev_priv->rps.cur_freq = val;
 
        trace_intel_gpu_freq_change(val * 50);
 }
@@ -3062,7 +3062,7 @@ static void vlv_set_rps_idle(struct drm_i915_private 
*dev_priv)
         * When we are idle.  Drop to min voltage state.
         */
 
-       if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
+       if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
                return;
 
        /* Mask turbo interrupt so that they will not come in between */
@@ -3079,10 +3079,10 @@ static void vlv_set_rps_idle(struct drm_i915_private 
*dev_priv)
                return;
        }
 
-       dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
+       dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
 
        vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
-                                       dev_priv->rps.min_delay);
+                                       dev_priv->rps.min_freq_softlimit);
 
        if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
                                & GENFREQSTATUS) == 0, 5))
@@ -3096,7 +3096,7 @@ static void vlv_set_rps_idle(struct drm_i915_private 
*dev_priv)
        /* Unmask Up interrupts */
        dev_priv->rps.rp_up_masked = true;
        gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
-                                               dev_priv->rps.min_delay);
+                                               
dev_priv->rps.min_freq_softlimit);
 }
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3108,7 +3108,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
                if (IS_VALLEYVIEW(dev))
                        vlv_set_rps_idle(dev_priv);
                else
-                       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+                       gen6_set_rps(dev_priv->dev, 
dev_priv->rps.min_freq_softlimit);
                dev_priv->rps.last_adj = 0;
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3121,9 +3121,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
                if (IS_VALLEYVIEW(dev))
-                       valleyview_set_rps(dev_priv->dev, 
dev_priv->rps.max_delay);
+                       valleyview_set_rps(dev_priv->dev, 
dev_priv->rps.max_freq_softlimit);
                else
-                       gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+                       gen6_set_rps(dev_priv->dev, 
dev_priv->rps.max_freq_softlimit);
                dev_priv->rps.last_adj = 0;
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3134,20 +3134,20 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-       WARN_ON(val > dev_priv->rps.max_delay);
-       WARN_ON(val < dev_priv->rps.min_delay);
+       WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+       WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
        DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
-                        dev_priv->rps.cur_delay,
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq,
                         vlv_gpu_freq(dev_priv, val), val);
 
-       if (val == dev_priv->rps.cur_delay)
+       if (val == dev_priv->rps.cur_freq)
                return;
 
        vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 
-       dev_priv->rps.cur_delay = val;
+       dev_priv->rps.cur_freq = val;
 
        trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
 }
@@ -3289,8 +3289,8 @@ static void gen8_enable_rps(struct drm_device *dev)
 
        /* Docs recommend 900MHz, and 300 MHz respectively */
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                  dev_priv->rps.max_delay << 24 |
-                  dev_priv->rps.min_delay << 16);
+                  dev_priv->rps.max_freq_softlimit << 24 |
+                  dev_priv->rps.min_freq_softlimit << 16);
 
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per 
EI, 90% */
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness 
per EI, 70%*/
@@ -3350,19 +3350,19 @@ static void gen6_enable_rps(struct drm_device *dev)
        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
        /* In units of 50MHz */
-       dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
+       dev_priv->rps.max_freq_overclock = hw_max = rp_state_cap & 0xff;
        hw_min = (rp_state_cap >> 16) & 0xff;
-       dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
-       dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
-       dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
-       dev_priv->rps.cur_delay = 0;
+       dev_priv->rps.min_freq_hardlimit = (rp_state_cap >>  8) & 0xff;
+       dev_priv->rps.max_freq_hardlimit = (rp_state_cap >>  0) & 0xff;
+       dev_priv->rps.nominal_freq = dev_priv->rps.min_freq_hardlimit;
+       dev_priv->rps.cur_freq = 0;
 
        /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_delay == 0)
-               dev_priv->rps.max_delay = hw_max;
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = hw_max;
 
-       if (dev_priv->rps.min_delay == 0)
-               dev_priv->rps.min_delay = hw_min;
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = hw_min;
 
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3417,13 +3417,13 @@ static void gen6_enable_rps(struct drm_device *dev)
        ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
        if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
                DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock 
max: %dMHz\n",
-                                (dev_priv->rps.max_delay & 0xff) * 50,
+                                (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
                                 (pcu_mbox & 0xff) * 50);
-               dev_priv->rps.hw_max = pcu_mbox & 0xff;
+               dev_priv->rps.max_freq_overclock = pcu_mbox & 0xff;
        }
 
        dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
        gen6_enable_rps_interrupts(dev);
 
@@ -3479,9 +3479,9 @@ void gen6_update_ring_freq(struct drm_device *dev)
         * to use for memory access.  We do this by specifying the IA frequency
         * the PCU should use as a reference to determine the ring frequency.
         */
-       for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= 
dev_priv->rps.min_delay;
+       for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= 
dev_priv->rps.min_freq_softlimit;
             gpu_freq--) {
-               int diff = dev_priv->rps.max_delay - gpu_freq;
+               int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
                unsigned int ia_freq = 0, ring_freq = 0;
 
                if (INTEL_INFO(dev)->gen >= 8) {
@@ -3647,20 +3647,20 @@ static void valleyview_enable_rps(struct drm_device 
*dev)
        DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-       dev_priv->rps.cur_delay = (val >> 8) & 0xff;
+       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
        DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
-                        dev_priv->rps.cur_delay);
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq);
 
-       dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
+       dev_priv->rps.max_freq_overclock = hw_max = 
valleyview_rps_max_freq(dev_priv);
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, hw_max),
                         hw_max);
 
-       dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
+       dev_priv->rps.nominal_freq = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
-                        dev_priv->rps.rpe_delay);
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.nominal_freq),
+                        dev_priv->rps.nominal_freq);
 
        hw_min = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
@@ -3668,17 +3668,17 @@ static void valleyview_enable_rps(struct drm_device 
*dev)
                         hw_min);
 
        /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_delay == 0)
-               dev_priv->rps.max_delay = hw_max;
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = hw_max;
 
-       if (dev_priv->rps.min_delay == 0)
-               dev_priv->rps.min_delay = hw_min;
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = hw_min;
 
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
-                        dev_priv->rps.rpe_delay);
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.nominal_freq),
+                        dev_priv->rps.nominal_freq);
 
-       valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.nominal_freq);
 
        dev_priv->rps.rp_up_masked = false;
        dev_priv->rps.rp_down_masked = false;
@@ -4119,7 +4119,7 @@ static unsigned long __i915_gfx_val(struct 
drm_i915_private *dev_priv)
 
        assert_spin_locked(&mchdev_lock);
 
-       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
+       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
        pxvid = (pxvid >> 24) & 0x7f;
        ext_v = pvid_to_extvid(dev_priv, pxvid);
 
-- 
1.9.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to