[PATCH 1/1] drm/i915/slpc: Support for GuC Based SLPC
Sagar Arun Kamble
sagar.a.kamble at intel.com
Sun Jul 3 09:12:01 UTC 2016
This is squash of 27 patches done to avoid spamming mailing list
with multiple mails. Being used for CI testing.
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble at intel.com>
---
drivers/gpu/drm/i915/Makefile | 5 +-
drivers/gpu/drm/i915/i915_debugfs.c | 460 ++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_drv.c | 17 +-
drivers/gpu/drm/i915/i915_drv.h | 7 +
drivers/gpu/drm/i915/i915_guc_submission.c | 45 +--
drivers/gpu/drm/i915/i915_params.c | 6 +
drivers/gpu/drm/i915/i915_params.h | 1 +
drivers/gpu/drm/i915/i915_pci.c | 2 +
drivers/gpu/drm/i915/i915_reg.h | 1 +
drivers/gpu/drm/i915/i915_sysfs.c | 21 ++
drivers/gpu/drm/i915/intel_display.c | 4 +-
drivers/gpu/drm/i915/intel_dp.c | 2 +
drivers/gpu/drm/i915/intel_drv.h | 14 +
drivers/gpu/drm/i915/intel_guc.h | 12 +
drivers/gpu/drm/i915/intel_guc_loader.c | 42 ++-
drivers/gpu/drm/i915/intel_pm.c | 114 +++++--
drivers/gpu/drm/i915/intel_slpc.c | 501 +++++++++++++++++++++++++++++
drivers/gpu/drm/i915/intel_slpc.h | 219 +++++++++++++
18 files changed, 1413 insertions(+), 60 deletions(-)
create mode 100644 drivers/gpu/drm/i915/intel_slpc.c
create mode 100644 drivers/gpu/drm/i915/intel_slpc.h
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 618293c..5f966f0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -45,8 +45,9 @@ i915-y += i915_cmd_parser.o \
intel_uncore.o
# general-purpose microcontroller (GuC) support
-i915-y += intel_guc_loader.o \
- i915_guc_submission.o
+i915-y += i915_guc_submission.o \
+ intel_guc_loader.o \
+ intel_slpc.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f5899b6..28c2319 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1196,6 +1196,438 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
i915_next_seqno_get, i915_next_seqno_set,
"0x%llx\n");
+static int slpc_enable_disable_get(struct drm_device *dev, u64 *val,
+ enum slpc_param_id enable_id,
+ enum slpc_param_id disable_id)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int override_enable, override_disable;
+ u32 value_enable, value_disable;
+ int ret = 0;
+
+ if (!intel_slpc_active(dev_priv)) {
+ ret = -ENODEV;
+ } else if (val) {
+ intel_slpc_get_param(dev_priv, enable_id, &override_enable,
+ &value_enable);
+ intel_slpc_get_param(dev_priv, disable_id, &override_disable,
+ &value_disable);
+
+ /* set the output value:
+ * 0: default
+ * 1: enabled
+ * 2: disabled
+ * 3: unknown (should not happen)
+ */
+ if (override_disable && (1 == value_disable))
+ *val = SLPC_PARAM_TASK_DISABLED;
+ else if (override_enable && (1 == value_enable))
+ *val = SLPC_PARAM_TASK_ENABLED;
+ else if (!override_enable && !override_disable)
+ *val = SLPC_PARAM_TASK_DEFAULT;
+ else
+ *val = SLPC_PARAM_TASK_UNKNOWN;
+
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int slpc_enable_disable_set(struct drm_device *dev, u64 val,
+ enum slpc_param_id enable_id,
+ enum slpc_param_id disable_id)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ if (!intel_slpc_active(dev_priv)) {
+ ret = -ENODEV;
+ } else if (SLPC_PARAM_TASK_DEFAULT == val) {
+ /* set default */
+ intel_slpc_unset_param(dev_priv, enable_id);
+ intel_slpc_unset_param(dev_priv, disable_id);
+ } else if (SLPC_PARAM_TASK_ENABLED == val) {
+ /* set enable */
+ intel_slpc_set_param(dev_priv, enable_id, 1);
+ intel_slpc_unset_param(dev_priv, disable_id);
+ } else if (SLPC_PARAM_TASK_DISABLED == val) {
+ /* set disable */
+ intel_slpc_set_param(dev_priv, disable_id, 1);
+ intel_slpc_unset_param(dev_priv, enable_id);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void slpc_param_show(struct seq_file *m, enum slpc_param_id enable_id,
+ enum slpc_param_id disable_id)
+{
+ struct drm_device *dev = m->private;
+ const char *status;
+ u64 val;
+ int ret;
+
+ ret = slpc_enable_disable_get(dev, &val, enable_id, disable_id);
+
+ if (ret) {
+ seq_printf(m, "error %d\n", ret);
+ } else {
+ switch (val) {
+ case SLPC_PARAM_TASK_DEFAULT:
+ status = "default\n";
+ break;
+
+ case SLPC_PARAM_TASK_ENABLED:
+ status = "enabled\n";
+ break;
+
+ case SLPC_PARAM_TASK_DISABLED:
+ status = "disabled\n";
+ break;
+
+ default:
+ status = "unknown\n";
+ break;
+ }
+
+ seq_puts(m, status);
+ }
+}
+
+static int slpc_param_write(struct seq_file *m, const char __user *ubuf,
+ size_t len, enum slpc_param_id enable_id,
+ enum slpc_param_id disable_id)
+{
+ struct drm_device *dev = m->private;
+ u64 val;
+ int ret = 0;
+ char buf[10];
+
+ if (len >= sizeof(buf))
+ ret = -EINVAL;
+ else if (copy_from_user(buf, ubuf, len))
+ ret = -EFAULT;
+ else
+ buf[len] = '\0';
+
+ if (!ret) {
+ if (!strncmp(buf, "default", 7))
+ val = SLPC_PARAM_TASK_DEFAULT;
+ else if (!strncmp(buf, "enabled", 7))
+ val = SLPC_PARAM_TASK_ENABLED;
+ else if (!strncmp(buf, "disabled", 8))
+ val = SLPC_PARAM_TASK_DISABLED;
+ else
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ ret = slpc_enable_disable_set(dev, val, enable_id, disable_id);
+
+ return ret;
+}
+
+static int slpc_gtperf_show(struct seq_file *m, void *data)
+{
+ slpc_param_show(m, SLPC_PARAM_TASK_ENABLE_GTPERF,
+ SLPC_PARAM_TASK_DISABLE_GTPERF);
+
+ return 0;
+}
+
+static int slpc_gtperf_open(struct inode *inode, struct file *file)
+{
+ struct drm_connector *dev = inode->i_private;
+
+ return single_open(file, slpc_gtperf_show, dev);
+}
+
+static ssize_t slpc_gtperf_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int ret = 0;
+
+ ret = slpc_param_write(m, ubuf, len, SLPC_PARAM_TASK_ENABLE_GTPERF,
+ SLPC_PARAM_TASK_DISABLE_GTPERF);
+ if (ret)
+ return (size_t) ret;
+
+ return len;
+}
+
+static const struct file_operations i915_slpc_gtperf_fops = {
+ .owner = THIS_MODULE,
+ .open = slpc_gtperf_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = slpc_gtperf_write,
+ .llseek = seq_lseek
+};
+
+static int slpc_balancer_show(struct seq_file *m, void *data)
+{
+ slpc_param_show(m, SLPC_PARAM_TASK_ENABLE_BALANCER,
+ SLPC_PARAM_TASK_DISABLE_BALANCER);
+
+ return 0;
+}
+
+static int slpc_balancer_open(struct inode *inode, struct file *file)
+{
+ struct drm_connector *dev = inode->i_private;
+
+ return single_open(file, slpc_balancer_show, dev);
+}
+
+static ssize_t slpc_balancer_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int ret = 0;
+
+ ret = slpc_param_write(m, ubuf, len, SLPC_PARAM_TASK_ENABLE_BALANCER,
+ SLPC_PARAM_TASK_DISABLE_BALANCER);
+ if (ret)
+ return (size_t) ret;
+
+ return len;
+}
+
+static const struct file_operations i915_slpc_balancer_fops = {
+ .owner = THIS_MODULE,
+ .open = slpc_balancer_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = slpc_balancer_write,
+ .llseek = seq_lseek
+};
+
+static int slpc_dcc_show(struct seq_file *m, void *data)
+{
+ slpc_param_show(m, SLPC_PARAM_TASK_ENABLE_DCC,
+ SLPC_PARAM_TASK_DISABLE_DCC);
+
+ return 0;
+}
+
+static int slpc_dcc_open(struct inode *inode, struct file *file)
+{
+ struct drm_connector *dev = inode->i_private;
+
+ return single_open(file, slpc_dcc_show, dev);
+}
+
+static ssize_t slpc_dcc_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int ret = 0;
+
+ ret = slpc_param_write(m, ubuf, len, SLPC_PARAM_TASK_ENABLE_DCC,
+ SLPC_PARAM_TASK_DISABLE_DCC);
+ if (ret)
+ return (size_t) ret;
+
+ return len;
+}
+
+static const struct file_operations i915_slpc_dcc_fops = {
+ .owner = THIS_MODULE,
+ .open = slpc_dcc_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = slpc_dcc_write,
+ .llseek = seq_lseek
+};
+
+static int i915_slpc_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct page *page;
+ void *pv = NULL;
+ struct slpc_shared_data data;
+ int i, value;
+ enum slpc_global_state global_state;
+ enum slpc_platform_sku platform_sku;
+ enum slpc_host_os host_os;
+ enum slpc_power_plan power_plan;
+ enum slpc_power_source power_source;
+
+ obj = dev_priv->guc.slpc.shared_data_obj;
+ if (obj) {
+ intel_slpc_query_task_state(dev_priv);
+
+ page = i915_gem_object_get_page(obj, 0);
+ if (page)
+ pv = kmap_atomic(page);
+ }
+
+ if (pv) {
+ data = *(struct slpc_shared_data *) pv;
+ kunmap_atomic(pv);
+
+ seq_printf(m, "SLPC Version: %d.%d.%d (0x%8x)\n",
+ data.slpc_version >> 16,
+ (data.slpc_version >> 8) & 0xFF,
+ data.slpc_version & 0xFF,
+ data.slpc_version);
+ seq_printf(m, "shared data size: %d\n", data.shared_data_size);
+
+ global_state = (enum slpc_global_state) data.global_state;
+ seq_printf(m, "global state: %d (", global_state);
+ switch (global_state) {
+ case SLPC_GLOBAL_STATE_NOT_RUNNING:
+ seq_puts(m, "not running)\n");
+ break;
+ case SLPC_GLOBAL_STATE_INITIALIZING:
+ seq_puts(m, "initializing)\n");
+ break;
+ case SLPC_GLOBAL_STATE_RESETTING:
+ seq_puts(m, "resetting)\n");
+ break;
+ case SLPC_GLOBAL_STATE_RUNNING:
+ seq_puts(m, "running)\n");
+ break;
+ case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
+ seq_puts(m, "shutting down)\n");
+ break;
+ case SLPC_GLOBAL_STATE_ERROR:
+ seq_puts(m, "error)\n");
+ break;
+ default:
+ seq_puts(m, "unknown)\n");
+ break;
+ }
+
+ platform_sku = (enum slpc_platform_sku)
+ data.platform_info.platform_sku;
+ seq_printf(m, "sku: %d (", platform_sku);
+ switch (platform_sku) {
+ case SLPC_PLATFORM_SKU_UNDEFINED:
+ seq_puts(m, "undefined)\n");
+ break;
+ case SLPC_PLATFORM_SKU_ULX:
+ seq_puts(m, "ULX)\n");
+ break;
+ case SLPC_PLATFORM_SKU_ULT:
+ seq_puts(m, "ULT)\n");
+ break;
+ case SLPC_PLATFORM_SKU_T:
+ seq_puts(m, "T)\n");
+ break;
+ case SLPC_PLATFORM_SKU_MOBL:
+ seq_puts(m, "Mobile)\n");
+ break;
+ case SLPC_PLATFORM_SKU_DT:
+ seq_puts(m, "DT)\n");
+ break;
+ case SLPC_PLATFORM_SKU_UNKNOWN:
+ default:
+ seq_puts(m, "unknown)\n");
+ break;
+ }
+ seq_printf(m, "slice count: %d\n",
+ data.platform_info.slice_count);
+
+ host_os = (enum slpc_host_os) data.platform_info.host_os;
+ seq_printf(m, "host OS: %d (", host_os);
+ switch (host_os) {
+ case SLPC_HOST_OS_UNDEFINED:
+ seq_puts(m, "undefined)\n");
+ break;
+ case SLPC_HOST_OS_WINDOWS_8:
+ seq_puts(m, "Windows 8)\n");
+ break;
+ default:
+ seq_puts(m, "unknown)\n");
+ break;
+ }
+
+ seq_printf(m, "power plan/source: 0x%x\n\tplan:\t",
+ data.platform_info.power_plan_source);
+ power_plan = (enum slpc_power_plan) SLPC_POWER_PLAN(
+ data.platform_info.power_plan_source);
+ power_source = (enum slpc_power_source) SLPC_POWER_SOURCE(
+ data.platform_info.power_plan_source);
+ switch (power_plan) {
+ case SLPC_POWER_PLAN_UNDEFINED:
+ seq_puts(m, "undefined");
+ break;
+ case SLPC_POWER_PLAN_BATTERY_SAVER:
+ seq_puts(m, "battery saver");
+ break;
+ case SLPC_POWER_PLAN_BALANCED:
+ seq_puts(m, "balanced");
+ break;
+ case SLPC_POWER_PLAN_PERFORMANCE:
+ seq_puts(m, "performance");
+ break;
+ case SLPC_POWER_PLAN_UNKNOWN:
+ default:
+ seq_puts(m, "unknown");
+ break;
+ }
+ seq_puts(m, "\n\tsource:\t");
+ switch (power_source) {
+ case SLPC_POWER_SOURCE_UNDEFINED:
+ seq_puts(m, "undefined\n");
+ break;
+ case SLPC_POWER_SOURCE_AC:
+ seq_puts(m, "AC\n");
+ break;
+ case SLPC_POWER_SOURCE_DC:
+ seq_puts(m, "DC\n");
+ break;
+ case SLPC_POWER_SOURCE_UNKNOWN:
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
+
+ seq_printf(m, "IA frequency (MHz):\n\tP0: %d\n\tP1: %d\n\tPe: %d\n\tPn: %d\n",
+ data.platform_info.P0_freq * 50,
+ data.platform_info.P1_freq * 50,
+ data.platform_info.Pe_freq * 50,
+ data.platform_info.Pn_freq * 50);
+ seq_printf(m, "RAPL package power limits:\n\t0x%08x\n\t0x%08x\n",
+ data.platform_info.package_rapl_limit_high,
+ data.platform_info.package_rapl_limit_low);
+ seq_printf(m, "task state data: 0x%08x\n",
+ data.task_state_data);
+ seq_printf(m, "\tturbo active: %d\n",
+ (data.task_state_data & 1));
+ seq_printf(m, "\tdfps stall possible: %d\n\tgame mode: %d\n\tdfps target fps: %d\n",
+ (data.task_state_data & 2),
+ (data.task_state_data & 4),
+ (data.task_state_data >> 3) & 0xFF);
+
+ seq_puts(m, "override parameter bitfield\n");
+ for (i = 0; i < SLPC_OVERRIDE_BITFIELD_SIZE; i++)
+ seq_printf(m, "%d: 0x%08x\n", i,
+ data.override_parameters_set_bits[i]);
+
+ seq_puts(m, "override parameters (only non-zero shown)\n");
+ for (i = 0; i < SLPC_MAX_OVERRIDE_PARAMETERS; i++) {
+ value = data.override_parameters_values[i];
+ if (value)
+ seq_printf(m, "%d: 0x%8x\n", i, value);
+ }
+
+ } else {
+ seq_puts(m, "no SLPC info available\n");
+ }
+
+ return 0;
+}
+
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1207,6 +1639,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+ if (intel_slpc_active(dev_priv))
+ dev_priv->rps.cur_freq = gen9_read_requested_freq(dev_priv);
+
if (IS_GEN5(dev)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -2441,6 +2876,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_file *file;
+ if (intel_slpc_active(dev_priv))
+ dev_priv->rps.cur_freq = gen9_read_requested_freq(dev_priv);
+
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
@@ -5041,6 +5479,15 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
+ if (intel_slpc_active(dev_priv)) {
+ intel_slpc_set_param(dev_priv,
+ SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
+ (u32) intel_gpu_freq(dev_priv, val));
+ intel_slpc_set_param(dev_priv,
+ SLPC_PARAM_GLOBAL_MAX_GT_SLICE_FREQ_MHZ,
+ (u32) intel_gpu_freq(dev_priv, val));
+ }
+
intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5108,6 +5555,15 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
+ if (intel_slpc_active(dev_priv)) {
+ intel_slpc_set_param(dev_priv,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ (u32) intel_gpu_freq(dev_priv, val));
+ intel_slpc_set_param(dev_priv,
+ SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ,
+ (u32) intel_gpu_freq(dev_priv, val));
+ }
+
intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5447,6 +5903,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},
+ {"i915_slpc_info", i915_slpc_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
@@ -5489,6 +5946,9 @@ static const struct i915_debugfs_files {
const struct file_operations *fops;
} i915_debugfs_files[] = {
{"i915_wedged", &i915_wedged_fops},
+ {"i915_slpc_gtperf", &i915_slpc_gtperf_fops},
+ {"i915_slpc_balancer", &i915_slpc_balancer_fops},
+ {"i915_slpc_dcc", &i915_slpc_dcc_fops},
{"i915_max_freq", &i915_max_freq_fops},
{"i915_min_freq", &i915_min_freq_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c9abf91..289fce5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2029,6 +2029,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
int ret;
/*
@@ -2085,6 +2086,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
+ /*
+ * Mark GuC FW load status as PENDING to avoid any Host to GuC actions
+ * invoked till GuC gets loaded in i915_drm_resume.
+ */
+ guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+
intel_uncore_early_sanitize(dev_priv, true);
if (IS_BROXTON(dev_priv)) {
@@ -2700,9 +2707,17 @@ static int intel_runtime_suspend(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
+ if (WARN_ON_ONCE(!intel_enable_rc6()))
return -ENODEV;
+ /*
+ * Once RC6 and RPS enabling is separated for non-GEN9 platforms
+ * below check should be removed.
+ */
+ if (!IS_GEN9(dev))
+ if (WARN_ON_ONCE(!dev_priv->rps.enabled))
+ return -ENODEV;
+
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0dcc43d..8e394cb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -762,6 +762,7 @@ struct intel_csr {
func(is_kabylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
+ func(has_slpc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
func(cursor_needs_physical) sep \
@@ -2864,6 +2865,7 @@ struct drm_i915_cmd_table {
#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
+#define HAS_SLPC(dev) (INTEL_INFO(dev)->has_slpc)
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8)
@@ -4031,4 +4033,9 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
return false;
}
+static inline u8 gen9_read_requested_freq(struct drm_i915_private *dev_priv)
+{
+ return (u8) GEN9_GET_FREQUENCY(I915_READ(GEN6_RPNSWREQ));
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 355b647..930f2d8 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -47,7 +47,7 @@
* Firmware writes a success/fail code back to the action register after
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
- * See host2guc_action()
+ * See i915_guc_action()
*
* Doorbells:
* Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
@@ -75,9 +75,11 @@ static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
return GUC2HOST_IS_RESPONSE(val);
}
-static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
+int i915_guc_action(struct intel_guc *guc, u32 *data, u32 len)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_guc_fw *guc_fw = &guc->guc_fw;
+
u32 status;
int i;
int ret;
@@ -85,6 +87,9 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
if (WARN_ON(len < 1 || len > 15))
return -EINVAL;
+ if (WARN_ON(guc_fw->guc_fw_load_status != GUC_FIRMWARE_SUCCESS))
+ return -ENODEV;
+
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
dev_priv->guc.action_count += 1;
@@ -135,7 +140,7 @@ static int host2guc_allocate_doorbell(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
data[1] = client->ctx_index;
- return host2guc_action(guc, data, 2);
+ return i915_guc_action(guc, data, 2);
}
static int host2guc_release_doorbell(struct intel_guc *guc,
@@ -146,7 +151,7 @@ static int host2guc_release_doorbell(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
data[1] = client->ctx_index;
- return host2guc_action(guc, data, 2);
+ return i915_guc_action(guc, data, 2);
}
static int host2guc_sample_forcewake(struct intel_guc *guc,
@@ -163,7 +168,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
/* bit 0 and 1 are for Render and Media domain separately */
data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
- return host2guc_action(guc, data, ARRAY_SIZE(data));
+ return i915_guc_action(guc, data, ARRAY_SIZE(data));
}
/*
@@ -607,7 +612,7 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
*/
/**
- * gem_allocate_guc_obj() - Allocate gem object for GuC usage
+ * i915_guc_allocate_gem_obj() - Allocate gem object for GuC usage
* @dev_priv: driver private data structure
* @size: size of object
*
@@ -617,8 +622,8 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
*
* Return: A drm_i915_gem_object if successful, otherwise NULL.
*/
-static struct drm_i915_gem_object *
-gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
+struct drm_i915_gem_object *
+i915_guc_allocate_gem_obj(struct drm_i915_private *dev_priv, u32 size)
{
struct drm_i915_gem_object *obj;
@@ -644,10 +649,10 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
}
/**
- * gem_release_guc_obj() - Release gem object allocated for GuC usage
+ * i915_guc_release_gem_obj() - Release gem object allocated for GuC usage
* @obj: gem obj to be released
*/
-static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
+void i915_guc_release_gem_obj(struct drm_i915_gem_object *obj)
{
if (!obj)
return;
@@ -682,7 +687,7 @@ guc_client_free(struct drm_i915_private *dev_priv,
kunmap(kmap_to_page(client->client_base));
}
- gem_release_guc_obj(client->client_obj);
+ i915_guc_release_gem_obj(client->client_obj);
if (client->ctx_index != GUC_INVALID_CTX_ID) {
guc_fini_ctx_desc(guc, client);
@@ -773,7 +778,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
- obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
+ obj = i915_guc_allocate_gem_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
if (!obj)
goto err;
@@ -840,7 +845,7 @@ static void guc_create_log(struct intel_guc *guc)
obj = guc->log_obj;
if (!obj) {
- obj = gem_allocate_guc_obj(dev_priv, size);
+ obj = i915_guc_allocate_gem_obj(dev_priv, size);
if (!obj) {
/* logging will be off */
i915.guc_log_level = -1;
@@ -900,7 +905,7 @@ static void guc_create_ads(struct intel_guc *guc)
obj = guc->ads_obj;
if (!obj) {
- obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
+ obj = i915_guc_allocate_gem_obj(dev_priv, PAGE_ALIGN(size));
if (!obj)
return;
@@ -971,7 +976,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
if (guc->ctx_pool_obj)
return 0; /* already allocated */
- guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
+ guc->ctx_pool_obj = i915_guc_allocate_gem_obj(dev_priv, gemsize);
if (!guc->ctx_pool_obj)
return -ENOMEM;
@@ -1015,15 +1020,15 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- gem_release_guc_obj(dev_priv->guc.ads_obj);
+ i915_guc_release_gem_obj(dev_priv->guc.ads_obj);
guc->ads_obj = NULL;
- gem_release_guc_obj(dev_priv->guc.log_obj);
+ i915_guc_release_gem_obj(dev_priv->guc.log_obj);
guc->log_obj = NULL;
if (guc->ctx_pool_obj)
ida_destroy(&guc->ctx_ids);
- gem_release_guc_obj(guc->ctx_pool_obj);
+ i915_guc_release_gem_obj(guc->ctx_pool_obj);
guc->ctx_pool_obj = NULL;
}
@@ -1049,7 +1054,7 @@ int intel_guc_suspend(struct drm_device *dev)
/* first page is shared data with GuC */
data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
- return host2guc_action(guc, data, ARRAY_SIZE(data));
+ return i915_guc_action(guc, data, ARRAY_SIZE(data));
}
@@ -1074,5 +1079,5 @@ int intel_guc_resume(struct drm_device *dev)
/* first page is shared data with GuC */
data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
- return host2guc_action(guc, data, ARRAY_SIZE(data));
+ return i915_guc_action(guc, data, ARRAY_SIZE(data));
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8b13bfa..59f662e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -36,6 +36,7 @@ struct i915_params i915 __read_mostly = {
.enable_dc = -1,
.enable_fbc = -1,
.enable_execlists = -1,
+ .enable_slpc = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = -1,
@@ -130,6 +131,11 @@ MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
"(-1=auto [default], 0=disabled, 1=enabled)");
+module_param_named_unsafe(enable_slpc, i915.enable_slpc, int, 0400);
+MODULE_PARM_DESC(enable_slpc,
+ "Override single-loop-power-controller (slpc) usage. "
+ "(-1=auto [default], 0=disabled, 1=enabled)");
+
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 0ad020b..48978d0 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -39,6 +39,7 @@ struct i915_params {
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
+ int enable_slpc;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 949c016..a3dc08a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -321,6 +321,7 @@ static const struct intel_device_info intel_skylake_info = {
BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
+ .has_slpc = 1,
};
static const struct intel_device_info intel_skylake_gt3_info = {
@@ -339,6 +340,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
+ .has_slpc = 1,
.has_pooled_eu = 0,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 8bfde75..a1defe9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7016,6 +7016,7 @@ enum {
#define GEN6_FREQUENCY(x) ((x)<<25)
#define HSW_FREQUENCY(x) ((x)<<24)
#define GEN9_FREQUENCY(x) ((x)<<23)
+#define GEN9_GET_FREQUENCY(x) ((x)>>23)
#define GEN6_OFFSET(x) ((x)<<19)
#define GEN6_AGGRESSIVE_TURBO (0<<15)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a6e90fe..f01773a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -310,6 +310,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
+ if (intel_slpc_active(dev_priv))
+ dev_priv->rps.cur_freq = gen9_read_requested_freq(dev_priv);
+
ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -382,6 +385,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
dev_priv->rps.max_freq_softlimit = val;
+ if (intel_slpc_active(dev_priv)) {
+ intel_slpc_set_param(dev_priv,
+ SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
+ (u32) intel_gpu_freq(dev_priv, val));
+ intel_slpc_set_param(dev_priv,
+ SLPC_PARAM_GLOBAL_MAX_GT_SLICE_FREQ_MHZ,
+ (u32) intel_gpu_freq(dev_priv, val));
+ }
+
val = clamp_t(int, dev_priv->rps.cur_freq,
dev_priv->rps.min_freq_softlimit,
dev_priv->rps.max_freq_softlimit);
@@ -446,6 +458,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_freq_softlimit = val;
+ if (intel_slpc_active(dev_priv)) {
+ intel_slpc_set_param(dev_priv,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ (u32) intel_gpu_freq(dev_priv, val));
+ intel_slpc_set_param(dev_priv,
+ SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ,
+ (u32) intel_gpu_freq(dev_priv, val));
+ }
+
val = clamp_t(int, dev_priv->rps.cur_freq,
dev_priv->rps.min_freq_softlimit,
dev_priv->rps.max_freq_softlimit);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 88e899b..38ef27d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13890,8 +13890,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_hw_done(state);
- if (intel_state->modeset)
+ if (intel_state->modeset) {
+ intel_slpc_update_atomic_commit_info(dev_priv, state);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
+ }
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0757ee4..1ae427b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -5090,6 +5090,8 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
dev_priv->drrs.refresh_rate_type = index;
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+
+ intel_slpc_update_display_rr_info(dev_priv, refresh_rate);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 98a5be4..aedbf8a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1673,6 +1673,19 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override);
+static inline int intel_slpc_active(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ int ret = 0;
+
+ if (guc_fw->guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
+ return 0;
+
+ if (dev_priv->guc.slpc.shared_data_obj && dev_priv->guc.slpc.enabled)
+ ret = 1;
+
+ return ret;
+}
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
@@ -1684,6 +1697,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
+void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv);
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 3e3e743..54cdf18 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -27,6 +27,7 @@
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
#include "intel_ringbuffer.h"
+#include "intel_slpc.h"
struct drm_i915_gem_request;
@@ -146,8 +147,15 @@ struct intel_guc {
uint64_t submissions[I915_NUM_ENGINES];
uint32_t last_seqno[I915_NUM_ENGINES];
+
+ struct intel_slpc slpc;
};
+static inline int intel_slpc_enabled(void)
+{
+ return i915.enable_slpc;
+}
+
/* intel_guc_loader.c */
extern void intel_guc_init(struct drm_device *dev);
extern int intel_guc_setup(struct drm_device *dev);
@@ -157,10 +165,14 @@ extern int intel_guc_suspend(struct drm_device *dev);
extern int intel_guc_resume(struct drm_device *dev);
/* i915_guc_submission.c */
+int i915_guc_action(struct intel_guc *guc, u32 *data, u32 len);
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
int i915_guc_submit(struct drm_i915_gem_request *rq);
+struct drm_i915_gem_object *
+i915_guc_allocate_gem_obj(struct drm_i915_private *dev_priv, u32 size);
+void i915_guc_release_gem_obj(struct drm_i915_gem_object *obj);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 4f6311a..76cefdb 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -129,6 +129,34 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
}
}
+static void sanitize_slpc_option(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+
+ /* Handle default case */
+ if (i915.enable_slpc < 0)
+ i915.enable_slpc = HAS_SLPC(dev);
+
+ /* slpc requires hardware support and compatible firmware */
+ if (!HAS_SLPC(dev))
+ i915.enable_slpc = 0;
+
+ /* slpc requires guc loaded */
+ if (!i915.enable_guc_loading)
+ i915.enable_slpc = 0;
+
+ /* slpc requires guc submission */
+ if (!i915.enable_guc_submission)
+ i915.enable_slpc = 0;
+
+ if ((IS_SKYLAKE(dev_priv) && (guc_fw->guc_fw_major_found != 6))
+ || (IS_BROXTON(dev_priv) && (guc_fw->guc_fw_major_found != 8))) {
+ DRM_INFO("SLPC not supported with current GuC firmware\n");
+ i915.enable_slpc = 0;
+ }
+}
+
static u32 get_gttype(struct drm_i915_private *dev_priv)
{
/* XXX: GT type based on PCI device ID? field seems unused by fw */
@@ -172,6 +200,9 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
+ if (intel_slpc_enabled())
+ params[GUC_CTL_FEATURE] |= GUC_CTL_ENABLE_SLPC;
+
if (i915.guc_log_level >= 0) {
params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
params[GUC_CTL_DEBUG] =
@@ -707,18 +738,21 @@ void intel_guc_init(struct drm_device *dev)
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
- /* Early (and silent) return if GuC loading is disabled */
+ /* Return if GuC loading is disabled sanitizing SLPC option */
if (!i915.enable_guc_loading)
- return;
+ goto out;
if (fw_path == NULL)
- return;
+ goto out;
if (*fw_path == '\0')
- return;
+ goto out;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
guc_fw_fetch(dev, guc_fw);
/* status must now be FAIL or SUCCESS */
+
+out:
+ sanitize_slpc_option(dev);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 82c2efd..a79832b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4924,6 +4924,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
+ if (intel_slpc_active(dev_priv))
+ return;
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
valleyview_set_rps(dev_priv, val);
else
@@ -4938,7 +4941,13 @@ static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
static void gen9_disable_rps(struct drm_i915_private *dev_priv)
{
- I915_WRITE(GEN6_RP_CONTROL, 0);
+ uint32_t rp_ctl = 0;
+
+ rp_ctl = I915_READ(GEN6_RP_CONTROL);
+
+ /* RP SW Mode Control will be needed for SLPC, Hence not clearing.*/
+ I915_WRITE(GEN6_RP_CONTROL, rp_ctl & GEN6_RP_MEDIA_MODE_MASK);
+ dev_priv->rps.enabled = false;
}
static void gen6_disable_rps(struct drm_i915_private *dev_priv)
@@ -4946,11 +4955,16 @@ static void gen6_disable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_RP_CONTROL, 0);
+
+ dev_priv->rps.enabled = false;
+
}
static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ dev_priv->rps.enabled = false;
}
static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
@@ -4962,6 +4976,8 @@ static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_CONTROL, 0);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ dev_priv->rps.enabled = false;
}
static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
@@ -5084,7 +5100,7 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
return INTEL_RC6_ENABLE;
}
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
+void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
uint32_t rp_state_cap;
u32 ddcc_status = 0;
@@ -5189,6 +5205,8 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ dev_priv->rps.enabled = true;
}
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
@@ -5336,6 +5354,8 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ dev_priv->rps.enabled = true;
}
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -5444,6 +5464,8 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ dev_priv->rps.enabled = true;
}
static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
@@ -5966,6 +5988,8 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ dev_priv->rps.enabled = true;
}
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
@@ -6055,6 +6079,8 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ dev_priv->rps.enabled = true;
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -6549,7 +6575,9 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_runtime_pm_get(dev_priv);
}
- if (IS_CHERRYVIEW(dev_priv))
+ if (intel_slpc_enabled())
+ intel_slpc_init(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
cherryview_init_gt_powersave(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_init_gt_powersave(dev_priv);
@@ -6557,7 +6585,9 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- if (IS_CHERRYVIEW(dev_priv))
+ if (intel_slpc_active(dev_priv))
+ intel_slpc_cleanup(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
return;
else if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
@@ -6586,15 +6616,22 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 6)
return;
- gen6_suspend_rps(dev_priv);
+ if (intel_slpc_active(dev_priv)) {
+ intel_slpc_suspend(dev_priv);
+ } else {
+ gen6_suspend_rps(dev_priv);
- /* Force GPU to min freq during suspend */
- gen6_rps_idle(dev_priv);
+ /* Force GPU to min freq during suspend */
+ gen6_rps_idle(dev_priv);
+ }
}
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- if (IS_IRONLAKE_M(dev_priv)) {
+ if (intel_slpc_active(dev_priv)) {
+ intel_slpc_disable(dev_priv);
+ gen9_disable_rc6(dev_priv);
+ } else if (IS_IRONLAKE_M(dev_priv)) {
ironlake_disable_drps(dev_priv);
} else if (INTEL_INFO(dev_priv)->gen >= 6) {
intel_suspend_gt_powersave(dev_priv);
@@ -6610,7 +6647,6 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
else
gen6_disable_rps(dev_priv);
- dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
@@ -6623,37 +6659,47 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- gen6_reset_rps_interrupts(dev_priv);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- cherryview_enable_rps(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- valleyview_enable_rps(dev_priv);
- } else if (INTEL_INFO(dev_priv)->gen >= 9) {
+ if (intel_slpc_enabled() &&
+ dev_priv->guc.slpc.shared_data_obj) {
gen9_enable_rc6(dev_priv);
- gen9_enable_rps(dev_priv);
+ intel_slpc_enable(dev_priv);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
__gen6_update_ring_freq(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- gen8_enable_rps(dev_priv);
- __gen6_update_ring_freq(dev_priv);
} else {
- gen6_enable_rps(dev_priv);
- __gen6_update_ring_freq(dev_priv);
- }
-
- WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
- WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+ gen6_reset_rps_interrupts(dev_priv);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ cherryview_enable_rps(dev_priv);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ valleyview_enable_rps(dev_priv);
+ } else if (INTEL_INFO(dev_priv)->gen >= 9) {
+ gen9_enable_rc6(dev_priv);
+ gen9_enable_rps(dev_priv);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ __gen6_update_ring_freq(dev_priv);
+ } else if (IS_BROADWELL(dev_priv)) {
+ gen8_enable_rps(dev_priv);
+ __gen6_update_ring_freq(dev_priv);
+ } else {
+ gen6_enable_rps(dev_priv);
+ __gen6_update_ring_freq(dev_priv);
+ }
- WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
- WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+ WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
- dev_priv->rps.enabled = true;
+ WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
- gen6_enable_rps_interrupts(dev_priv);
+ gen6_enable_rps_interrupts(dev_priv);
+ }
mutex_unlock(&dev_priv->rps.hw_lock);
+ /* Notify initial display mode information to SLPC */
+ if (intel_slpc_active(dev_priv))
+ intel_slpc_update_display_mode_info(dev_priv);
+
intel_runtime_pm_put(dev_priv);
}
@@ -6692,8 +6738,12 @@ void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen < 6)
return;
- gen6_suspend_rps(dev_priv);
- dev_priv->rps.enabled = false;
+ if (intel_slpc_active(dev_priv)) {
+ intel_slpc_reset(dev_priv);
+ } else {
+ gen6_suspend_rps(dev_priv);
+ dev_priv->rps.enabled = false;
+ }
}
static void ibx_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_slpc.c b/drivers/gpu/drm/i915/intel_slpc.c
new file mode 100644
index 0000000..2ec89b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_slpc.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include <asm/msr-index.h>
+#include "i915_drv.h"
+#include "intel_guc.h"
+
+static void host2guc_slpc(struct drm_i915_private *dev_priv, u32 *data, u32 len)
+{
+ int ret = i915_guc_action(&dev_priv->guc, data, len);
+
+ if (!ret) {
+ ret = I915_READ(SOFT_SCRATCH(1));
+ ret &= SLPC_EVENT_STATUS_MASK;
+ }
+
+ if (ret)
+ DRM_ERROR("event 0x%x status %d\n", (data[1] >> 8), ret);
+}
+
+static void host2guc_slpc_reset(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj = dev_priv->guc.slpc.shared_data_obj;
+ u32 data[4];
+ u64 shared_data_gtt_offset = i915_gem_obj_ggtt_offset(obj);
+
+ data[0] = HOST2GUC_ACTION_SLPC_REQUEST;
+ data[1] = SLPC_EVENT(SLPC_EVENT_RESET, 2);
+ data[2] = lower_32_bits(shared_data_gtt_offset);
+ data[3] = upper_32_bits(shared_data_gtt_offset);
+
+ host2guc_slpc(dev_priv, data, 4);
+}
+
+static void host2guc_slpc_shutdown(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj = dev_priv->guc.slpc.shared_data_obj;
+ u32 data[4];
+ u64 shared_data_gtt_offset = i915_gem_obj_ggtt_offset(obj);
+
+ data[0] = HOST2GUC_ACTION_SLPC_REQUEST;
+ data[1] = SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2);
+ data[2] = lower_32_bits(shared_data_gtt_offset);
+ data[3] = upper_32_bits(shared_data_gtt_offset);
+
+ host2guc_slpc(dev_priv, data, 4);
+}
+
+static void host2guc_slpc_display_mode_change(struct drm_i915_private *dev_priv)
+{
+ u32 data[3 + SLPC_MAX_NUM_OF_PIPES];
+ int i;
+ struct intel_slpc_display_mode_event_params *display_mode_params;
+
+ display_mode_params = &dev_priv->guc.slpc.display_mode_params;
+ data[0] = HOST2GUC_ACTION_SLPC_REQUEST;
+ data[1] = SLPC_EVENT(SLPC_EVENT_DISPLAY_MODE_CHANGE,
+ SLPC_MAX_NUM_OF_PIPES + 1);
+ data[2] = display_mode_params->global_data;
+ for(i = 0; i < SLPC_MAX_NUM_OF_PIPES; ++i)
+ data[3+i] = display_mode_params->per_pipe_info[i].data;
+
+ host2guc_slpc(dev_priv, data, 3 + SLPC_MAX_NUM_OF_PIPES);
+}
+
+static void host2guc_slpc_set_param(struct drm_i915_private *dev_priv,
+ enum slpc_param_id id, u32 value)
+{
+ u32 data[4];
+
+ data[0] = HOST2GUC_ACTION_SLPC_REQUEST;
+ data[1] = SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2);
+ data[2] = (u32) id;
+ data[3] = value;
+
+ host2guc_slpc(dev_priv, data, 4);
+}
+
+static void host2guc_slpc_unset_param(struct drm_i915_private *dev_priv,
+ enum slpc_param_id id)
+{
+ u32 data[3];
+
+ data[0] = HOST2GUC_ACTION_SLPC_REQUEST;
+ data[1] = SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1);
+ data[2] = (u32) id;
+
+ host2guc_slpc(dev_priv, data, 3);
+}
+
+static void host2guc_slpc_query_task_state(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj = dev_priv->guc.slpc.shared_data_obj;
+ u32 data[4];
+ u64 shared_data_gtt_offset = i915_gem_obj_ggtt_offset(obj);
+
+ data[0] = HOST2GUC_ACTION_SLPC_REQUEST;
+ data[1] = SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2);
+ data[2] = lower_32_bits(shared_data_gtt_offset);
+ data[3] = upper_32_bits(shared_data_gtt_offset);
+
+ host2guc_slpc(dev_priv, data, 4);
+}
+
+static u8 slpc_get_platform_sku(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ enum slpc_platform_sku platform_sku;
+
+ if (IS_SKL_ULX(dev))
+ platform_sku = SLPC_PLATFORM_SKU_ULX;
+ else if (IS_SKL_ULT(dev))
+ platform_sku = SLPC_PLATFORM_SKU_ULT;
+ else
+ platform_sku = SLPC_PLATFORM_SKU_DT;
+
+ return (u8) platform_sku;
+}
+
+static u8 slpc_get_slice_count(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ u8 slice_count = 1;
+
+ if (IS_SKYLAKE(dev))
+ slice_count = INTEL_INFO(dev)->slice_total;
+
+ return slice_count;
+}
+
+static void slpc_shared_data_init(struct drm_i915_gem_object *obj)
+{
+ struct page *page;
+ struct slpc_shared_data *data;
+ u64 msr_value;
+
+ page = i915_gem_object_get_page(obj, 0);
+ if (page) {
+ data = kmap_atomic(page);
+ memset(data, 0, sizeof(struct slpc_shared_data));
+
+ data->slpc_version = SLPC_VERSION;
+ data->shared_data_size = sizeof(struct slpc_shared_data);
+ data->global_state = (u32) SLPC_GLOBAL_STATE_NOT_RUNNING;
+ data->platform_info.platform_sku = slpc_get_platform_sku(obj);
+ data->platform_info.slice_count = slpc_get_slice_count(obj);
+ data->platform_info.host_os = (u8) SLPC_HOST_OS_WINDOWS_8;
+ data->platform_info.power_plan_source =
+ (u8) SLPC_POWER_PLAN_SOURCE(SLPC_POWER_PLAN_BALANCED,
+ SLPC_POWER_SOURCE_AC);
+ rdmsrl(MSR_TURBO_RATIO_LIMIT, msr_value);
+ data->platform_info.P0_freq = (u8) msr_value;
+ rdmsrl(MSR_PLATFORM_INFO, msr_value);
+ data->platform_info.P1_freq = (u8) (msr_value >> 8);
+ data->platform_info.Pe_freq = (u8) (msr_value >> 40);
+ data->platform_info.Pn_freq = (u8) (msr_value >> 48);
+ rdmsrl(MSR_PKG_POWER_LIMIT, msr_value);
+ data->platform_info.package_rapl_limit_high =
+ (u32) (msr_value >> 32);
+ data->platform_info.package_rapl_limit_low = (u32) msr_value;
+
+ kunmap_atomic(data);
+ }
+}
+
+void intel_slpc_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+
+ /* Allocate shared data structure */
+ obj = dev_priv->guc.slpc.shared_data_obj;
+ if (!obj) {
+ mutex_lock(&dev->struct_mutex);
+ obj = i915_guc_allocate_gem_obj(dev_priv,
+ PAGE_ALIGN(sizeof(struct slpc_shared_data)));
+ mutex_unlock(&dev->struct_mutex);
+ dev_priv->guc.slpc.shared_data_obj = obj;
+ WARN_ON(upper_32_bits(i915_gem_obj_ggtt_offset(obj)) != 0);
+ }
+
+ if (!obj)
+ DRM_ERROR("slpc_shared_data allocation failed\n");
+ else
+ slpc_shared_data_init(obj);
+}
+
+void intel_slpc_cleanup(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Release shared data structure */
+ mutex_lock(&dev->struct_mutex);
+ i915_guc_release_gem_obj(dev_priv->guc.slpc.shared_data_obj);
+ mutex_unlock(&dev->struct_mutex);
+ dev_priv->guc.slpc.shared_data_obj = NULL;
+}
+
+void intel_slpc_suspend(struct drm_i915_private *dev_priv)
+{
+ host2guc_slpc_shutdown(dev_priv);
+ dev_priv->guc.slpc.enabled = false;
+}
+
+void intel_slpc_disable(struct drm_i915_private *dev_priv)
+{
+ host2guc_slpc_shutdown(dev_priv);
+ dev_priv->guc.slpc.enabled = false;
+}
+
+void intel_slpc_enable(struct drm_i915_private *dev_priv)
+{
+ /* Initialize the rps frequecny values */
+ gen6_init_rps_frequencies(dev_priv);
+
+ host2guc_slpc_reset(dev_priv);
+ dev_priv->guc.slpc.enabled = true;
+
+ return;
+}
+
+void intel_slpc_reset(struct drm_i915_private *dev_priv)
+{
+ host2guc_slpc_shutdown(dev_priv);
+ dev_priv->guc.slpc.enabled = false;
+}
+
+void intel_slpc_update_display_mode_info(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *intel_crtc;
+ struct intel_display_pipe_info *per_pipe_info;
+ struct intel_slpc_display_mode_event_params *cur_params, old_params;
+ bool notify = false;
+
+ if (!intel_slpc_active(dev_priv))
+ return;
+
+ /* Copy display mode parameters for comparison */
+ cur_params = &dev_priv->guc.slpc.display_mode_params;
+ old_params.global_data = cur_params->global_data;
+ cur_params->global_data = 0;
+
+ intel_runtime_pm_get(dev_priv);
+ drm_modeset_lock_all(dev_priv->dev);
+
+ for_each_intel_crtc(dev_priv->dev, intel_crtc) {
+ per_pipe_info = &cur_params->per_pipe_info[intel_crtc->pipe];
+ old_params.per_pipe_info[intel_crtc->pipe].data =
+ per_pipe_info->data;
+ per_pipe_info->data = 0;
+
+ if (intel_crtc->active) {
+ struct drm_display_mode *mode = &intel_crtc->base.mode;
+
+ if (mode->clock == 0 || mode->htotal == 0 ||
+ mode->vtotal == 0) {
+ DRM_DEBUG_DRIVER(
+ "Display Mode Info not sent to SLPC\n");
+ drm_modeset_unlock_all(dev_priv->dev);
+ intel_runtime_pm_put(dev_priv);
+ return;
+ }
+ /* FIXME: Update is_widi based on encoder */
+ per_pipe_info->is_widi = 0;
+ per_pipe_info->refresh_rate =
+ (u32)(((u64)mode->clock * 1000) /
+ ((u64)mode->htotal * (u64)mode->vtotal));
+ per_pipe_info->vsync_ft_usec =
+ (u32)(((u64)mode->htotal * (u64)mode->vtotal * 1000) /
+ (u64)mode->clock);
+
+ cur_params->active_pipes_bitmask |=
+ (1 << intel_crtc->pipe);
+ cur_params->vbi_sync_on_pipes |=
+ (1 << intel_crtc->pipe);
+ } else {
+ cur_params->active_pipes_bitmask &=
+ ~(1 << intel_crtc->pipe);
+ cur_params->vbi_sync_on_pipes &=
+ ~(1 << intel_crtc->pipe);
+ }
+
+ if (old_params.per_pipe_info[intel_crtc->pipe].data !=
+ per_pipe_info->data)
+ notify = true;
+ }
+
+ drm_modeset_unlock_all(dev_priv->dev);
+
+ cur_params->num_active_pipes =
+ hweight32(cur_params->active_pipes_bitmask);
+
+ /*
+ * Compare old display mode with current mode.
+ * Notify SLPC if it is changed.
+ */
+ if (cur_params->global_data != old_params.global_data)
+ notify = true;
+
+ if (notify)
+ host2guc_slpc_display_mode_change(dev_priv);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+void intel_slpc_update_atomic_commit_info(struct drm_i915_private *dev_priv,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct intel_display_pipe_info *per_pipe_info;
+ struct intel_slpc_display_mode_event_params *cur_params, old_params;
+ bool notify = false;
+ int i;
+
+ if (!intel_slpc_active(dev_priv))
+ return;
+
+ /* Copy display mode parameters for comparison */
+ cur_params = &dev_priv->guc.slpc.display_mode_params;
+ old_params.global_data = cur_params->global_data;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ per_pipe_info = &cur_params->per_pipe_info[intel_crtc->pipe];
+ old_params.per_pipe_info[intel_crtc->pipe].data =
+ per_pipe_info->data;
+
+ per_pipe_info->data = 0;
+ cur_params->active_pipes_bitmask &=
+ ~(1 << intel_crtc->pipe);
+ cur_params->vbi_sync_on_pipes &=
+ ~(1 << intel_crtc->pipe);
+
+ if (crtc_state->active) {
+ struct drm_display_mode *mode = &crtc->mode;
+
+ if (mode->clock == 0 || mode->htotal == 0 ||
+ mode->vtotal == 0) {
+ DRM_DEBUG_DRIVER(
+ "Display Mode Info not sent to SLPC\n");
+ return;
+ }
+
+ /* FIXME: Update is_widi based on encoder */
+ per_pipe_info->is_widi = 0;
+ per_pipe_info->refresh_rate =
+ (u32)(((u64)mode->clock * 1000) /
+ ((u64)mode->htotal * (u64)mode->vtotal));
+ per_pipe_info->vsync_ft_usec =
+ (u32)(((u64)mode->htotal * (u64)mode->vtotal * 1000) /
+ (u64)mode->clock);
+
+ cur_params->active_pipes_bitmask |=
+ (1 << intel_crtc->pipe);
+ cur_params->vbi_sync_on_pipes |=
+ (1 << intel_crtc->pipe);
+ }
+
+ if (old_params.per_pipe_info[intel_crtc->pipe].data !=
+ per_pipe_info->data)
+ notify = true;
+ }
+
+ cur_params->num_active_pipes =
+ hweight32(cur_params->active_pipes_bitmask);
+
+ /*
+ * Compare old display mode with current mode.
+ * Notify SLPC if it is changed.
+ */
+ if (cur_params->global_data != old_params.global_data)
+ notify = true;
+
+ if (notify)
+ host2guc_slpc_display_mode_change(dev_priv);
+}
+
+void intel_slpc_update_display_rr_info(struct drm_i915_private *dev_priv, u32 refresh_rate)
+{
+ struct drm_crtc *crtc;
+ struct intel_display_pipe_info *per_pipe_info;
+ struct intel_slpc_display_mode_event_params *display_params;
+
+ if (!intel_slpc_active(dev_priv))
+ return;
+
+ if (!refresh_rate)
+ return;
+
+ display_params = &dev_priv->guc.slpc.display_mode_params;
+ crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+
+ per_pipe_info = &display_params->per_pipe_info[to_intel_crtc(crtc)->pipe];
+ per_pipe_info->refresh_rate = refresh_rate;
+ per_pipe_info->vsync_ft_usec = 1000000 / refresh_rate;
+
+ host2guc_slpc_display_mode_change(dev_priv);
+}
+
+void intel_slpc_unset_param(struct drm_i915_private *dev_priv, enum slpc_param_id id)
+{
+ struct drm_i915_gem_object *obj;
+ struct page *page;
+ struct slpc_shared_data *data = NULL;
+
+ obj = dev_priv->guc.slpc.shared_data_obj;
+ if (obj) {
+ page = i915_gem_object_get_page(obj, 0);
+ if (page)
+ data = kmap_atomic(page);
+ }
+
+ if (data) {
+ data->override_parameters_set_bits[id >> 5]
+ &= (~(1 << (id % 32)));
+ data->override_parameters_values[id] = 0;
+ kunmap_atomic(data);
+
+ host2guc_slpc_unset_param(dev_priv, id);
+ }
+}
+
+void intel_slpc_set_param(struct drm_i915_private *dev_priv, enum slpc_param_id id,
+ u32 value)
+{
+ struct drm_i915_gem_object *obj;
+ struct page *page;
+ struct slpc_shared_data *data = NULL;
+
+ obj = dev_priv->guc.slpc.shared_data_obj;
+ if (obj) {
+ page = i915_gem_object_get_page(obj, 0);
+ if (page)
+ data = kmap_atomic(page);
+ }
+
+ if (data) {
+ data->override_parameters_set_bits[id >> 5]
+ |= (1 << (id % 32));
+ data->override_parameters_values[id] = value;
+ kunmap_atomic(data);
+
+ host2guc_slpc_set_param(dev_priv, id, value);
+ }
+}
+
+void intel_slpc_get_param(struct drm_i915_private *dev_priv, enum slpc_param_id id,
+ int *overriding, u32 *value)
+{
+ struct drm_i915_gem_object *obj;
+ struct page *page;
+ struct slpc_shared_data *data = NULL;
+ u32 bits;
+
+ obj = dev_priv->guc.slpc.shared_data_obj;
+ if (obj) {
+ page = i915_gem_object_get_page(obj, 0);
+ if (page)
+ data = kmap_atomic(page);
+ }
+
+ if (data) {
+ if (overriding) {
+ bits = data->override_parameters_set_bits[id >> 5];
+ *overriding = (0 != (bits & (1 << (id % 32))));
+ }
+ if (value)
+ *value = data->override_parameters_values[id];
+
+ kunmap_atomic(data);
+ }
+}
+
+void intel_slpc_query_task_state(struct drm_i915_private *dev_priv)
+{
+ if (intel_slpc_active(dev_priv))
+ host2guc_slpc_query_task_state(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/intel_slpc.h b/drivers/gpu/drm/i915/intel_slpc.h
new file mode 100644
index 0000000..12d15e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_slpc.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _INTEL_SLPC_H_
+#define _INTEL_SLPC_H_
+
+#define SLPC_MAJOR_VER 2
+#define SLPC_MINOR_VER 4
+#define SLPC_VERSION ((2015 << 16) | (SLPC_MAJOR_VER << 8) | (SLPC_MINOR_VER))
+
+enum slpc_status {
+ SLPC_STATUS_OK = 0,
+ SLPC_STATUS_ERROR = 1,
+ SLPC_STATUS_ILLEGAL_COMMAND = 2,
+ SLPC_STATUS_INVALID_ARGS = 3,
+ SLPC_STATUS_INVALID_PARAMS = 4,
+ SLPC_STATUS_INVALID_DATA = 5,
+ SLPC_STATUS_OUT_OF_RANGE = 6,
+ SLPC_STATUS_NOT_SUPPORTED = 7,
+ SLPC_STATUS_NOT_IMPLEMENTED = 8,
+ SLPC_STATUS_NO_DATA = 9,
+ SLPC_STATUS_EVENT_NOT_REGISTERED = 10,
+ SLPC_STATUS_REGISTER_LOCKED = 11,
+ SLPC_STATUS_TEMPORARILY_UNAVAILABLE = 12,
+ SLPC_STATUS_VALUE_ALREADY_SET = 13,
+ SLPC_STATUS_VALUE_ALREADY_UNSET = 14,
+ SLPC_STATUS_VALUE_NOT_CHANGED = 15,
+ SLPC_STATUS_MISMATCHING_VERSION = 16,
+ SLPC_STATUS_MEMIO_ERROR = 17,
+ SLPC_STATUS_EVENT_QUEUED_REQ_DPC = 18,
+ SLPC_STATUS_EVENT_QUEUED_NOREQ_DPC = 19,
+ SLPC_STATUS_NO_EVENT_QUEUED = 20,
+ SLPC_STATUS_OUT_OF_SPACE = 21,
+ SLPC_STATUS_TIMEOUT = 22,
+ SLPC_STATUS_NO_LOCK = 23,
+};
+
+enum slpc_event_id {
+ SLPC_EVENT_RESET = 0,
+ SLPC_EVENT_SHUTDOWN = 1,
+ SLPC_EVENT_PLATFORM_INFO_CHANGE = 2,
+ SLPC_EVENT_DISPLAY_MODE_CHANGE = 3,
+ SLPC_EVENT_FLIP_COMPLETE = 4,
+ SLPC_EVENT_QUERY_TASK_STATE = 5,
+ SLPC_EVENT_PARAMETER_SET = 6,
+ SLPC_EVENT_PARAMETER_UNSET = 7,
+};
+
+#define SLPC_EVENT(id, argc) ((u32) (id) << 8 | (argc))
+#define SLPC_EVENT_STATUS_MASK 0xFF
+
+enum slpc_param_id {
+ SLPC_PARAM_TASK_ENABLE_GTPERF = 0,
+ SLPC_PARAM_TASK_DISABLE_GTPERF = 1,
+ SLPC_PARAM_TASK_ENABLE_BALANCER = 2,
+ SLPC_PARAM_TASK_DISABLE_BALANCER = 3,
+ SLPC_PARAM_TASK_ENABLE_DCC = 4,
+ SLPC_PARAM_TASK_DISABLE_DCC = 5,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ = 6,
+ SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ = 7,
+ SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ = 8,
+ SLPC_PARAM_GLOBAL_MAX_GT_SLICE_FREQ_MHZ = 9,
+ SLPC_PARAM_DFPS_THRESHOLD_MAX_FPS = 10,
+ SLPC_PARAM_GLOBAL_DISABLE_GT_FREQ_MANAGEMENT = 11,
+ SLPC_PARAM_DFPS_DISABLE_FRAMERATE_STALLING = 12,
+ SLPC_PARAM_GLOBAL_DISABLE_RC6_MODE_CHANGE = 13,
+ SLPC_PARAM_GLOBAL_OC_UNSLICE_FREQ_MHZ = 14,
+ SLPC_PARAM_GLOBAL_OC_SLICE_FREQ_MHZ = 15,
+ SLPC_PARAM_GLOBAL_DISABE_IA_GT_BALANCING = 16,
+};
+
+#define SLPC_PARAM_TASK_DEFAULT 0
+#define SLPC_PARAM_TASK_ENABLED 1
+#define SLPC_PARAM_TASK_DISABLED 2
+#define SLPC_PARAM_TASK_UNKNOWN 3
+
+enum slpc_global_state {
+ SLPC_GLOBAL_STATE_NOT_RUNNING = 0,
+ SLPC_GLOBAL_STATE_INITIALIZING = 1,
+ SLPC_GLOBAL_STATE_RESETTING = 2,
+ SLPC_GLOBAL_STATE_RUNNING = 3,
+ SLPC_GLOBAL_STATE_SHUTTING_DOWN = 4,
+ SLPC_GLOBAL_STATE_ERROR = 5
+};
+
+enum slpc_host_os {
+ SLPC_HOST_OS_UNDEFINED = 0,
+ SLPC_HOST_OS_WINDOWS_8 = 1,
+};
+
+enum slpc_platform_sku {
+ SLPC_PLATFORM_SKU_UNDEFINED = 0,
+ SLPC_PLATFORM_SKU_ULX = 1,
+ SLPC_PLATFORM_SKU_ULT = 2,
+ SLPC_PLATFORM_SKU_T = 3,
+ SLPC_PLATFORM_SKU_MOBL = 4,
+ SLPC_PLATFORM_SKU_DT = 5,
+ SLPC_PLATFORM_SKU_UNKNOWN = 6,
+};
+
+enum slpc_power_plan {
+ SLPC_POWER_PLAN_UNDEFINED = 0,
+ SLPC_POWER_PLAN_BATTERY_SAVER = 1,
+ SLPC_POWER_PLAN_BALANCED = 2,
+ SLPC_POWER_PLAN_PERFORMANCE = 3,
+ SLPC_POWER_PLAN_UNKNOWN = 4,
+};
+
+enum slpc_power_source {
+ SLPC_POWER_SOURCE_UNDEFINED = 0,
+ SLPC_POWER_SOURCE_AC = 1,
+ SLPC_POWER_SOURCE_DC = 2,
+ SLPC_POWER_SOURCE_UNKNOWN = 3,
+};
+
+#define SLPC_POWER_PLAN_SOURCE(plan, source) ((plan) | ((source) << 6))
+#define SLPC_POWER_PLAN(plan_source) ((plan_source) & 0x3F)
+#define SLPC_POWER_SOURCE(plan_source) ((plan_source) >> 6)
+
+struct slpc_platform_info {
+ u8 platform_sku;
+ u8 slice_count;
+ u8 host_os;
+ u8 power_plan_source;
+ u8 P0_freq;
+ u8 P1_freq;
+ u8 Pe_freq;
+ u8 Pn_freq;
+ u32 package_rapl_limit_high;
+ u32 package_rapl_limit_low;
+} __packed;
+
+#define SLPC_MAX_OVERRIDE_PARAMETERS 192
+#define SLPC_OVERRIDE_BITFIELD_SIZE ((SLPC_MAX_OVERRIDE_PARAMETERS + 31) / 32)
+
+struct slpc_shared_data {
+ u32 slpc_version;
+ u32 shared_data_size;
+ u32 global_state;
+ struct slpc_platform_info platform_info;
+ u32 task_state_data;
+ u32 override_parameters_set_bits[SLPC_OVERRIDE_BITFIELD_SIZE];
+ u32 override_parameters_values[SLPC_MAX_OVERRIDE_PARAMETERS];
+} __packed;
+
+#define SLPC_MAX_NUM_OF_PIPES 4
+
+struct intel_display_pipe_info {
+ union {
+ u32 data;
+ struct {
+ u32 is_widi:1;
+ u32 refresh_rate:7;
+ u32 vsync_ft_usec:24;
+ };
+ };
+} __packed;
+
+struct intel_slpc_display_mode_event_params {
+ struct {
+ struct intel_display_pipe_info
+ per_pipe_info[SLPC_MAX_NUM_OF_PIPES];
+ union {
+ u32 global_data;
+ struct {
+ u32 active_pipes_bitmask:SLPC_MAX_NUM_OF_PIPES;
+ u32 fullscreen_pipes:SLPC_MAX_NUM_OF_PIPES;
+ u32 vbi_sync_on_pipes:SLPC_MAX_NUM_OF_PIPES;
+ u32 num_active_pipes:2;
+ };
+ };
+ };
+} __packed;
+
+struct intel_slpc {
+ struct drm_i915_gem_object *shared_data_obj;
+ bool enabled;
+ struct intel_slpc_display_mode_event_params display_mode_params;
+};
+
+/* intel_slpc.c */
+void intel_slpc_init(struct drm_i915_private *dev_priv);
+void intel_slpc_cleanup(struct drm_i915_private *dev_priv);
+void intel_slpc_suspend(struct drm_i915_private *dev_priv);
+void intel_slpc_disable(struct drm_i915_private *dev_priv);
+void intel_slpc_enable(struct drm_i915_private *dev_priv);
+void intel_slpc_reset(struct drm_i915_private *dev_priv);
+void intel_slpc_update_display_mode_info(struct drm_i915_private *dev_priv);
+void intel_slpc_update_atomic_commit_info(struct drm_i915_private *dev_priv,
+ struct drm_atomic_state *state);
+void intel_slpc_update_display_rr_info(struct drm_i915_private *dev_priv, u32 refresh_rate);
+
+void intel_slpc_unset_param(struct drm_i915_private *dev_priv, enum slpc_param_id id);
+void intel_slpc_set_param(struct drm_i915_private *dev_priv, enum slpc_param_id id,
+ u32 value);
+void intel_slpc_get_param(struct drm_i915_private *dev_priv, enum slpc_param_id id,
+ int *overriding, u32 *value);
+void intel_slpc_query_task_state(struct drm_i915_private *dev_priv);
+#endif
--
1.9.1
More information about the Intel-gfx-trybot
mailing list