[PATCH 1/4] drm/i915/guc: Introduce GuC buffer object

Jackie Li yaodong.li at intel.com
Fri Mar 2 23:59:05 UTC 2018


Gfx buffers for GuC needs to be allocated above GuC WOPCM top. currently,
we have a dedicated funcation to wrap a bunch of function to enforce
related rules during vma pin operation. However, since the lack of GuC
specific buffer abstraction, current driver has no idea about whether a
vma in use is GuC related or not which led to unnecessary checks such as
guc_ggtt_offset() needed to be use for each place whether vma offset is
needed. Another probem is most of the GuC objects (e.g. stage_desc_pool,
shared_data, ct) are all following the same logic to pin/map the GEM object
which had introduced duplicated code.

This patch introduces GuC buffer object (guc_bo) which is basically a
wrapper of an i915_vma object and also added fields to store verified
ggtt_offset and mapped CPU virtual address. This patch also privides
centralized GuC BO allocation APIs and updates the GuC code to use the new
API to allocate GuC GEM buffers. It's now only updating the code and
doesn't make any changes to the current logics. However, the GuC BO concept
and related API will facilitate any changes such as enforcing set_domain
calls. Following patches will continue using the same abstraction
for other GuC memory pin/map operations in other cases such as FW DMA.

Signed-off-by: Jackie Li <yaodong.li at intel.com>
---
 drivers/gpu/drm/i915/Makefile               |   1 +
 drivers/gpu/drm/i915/i915_debugfs.c         |  10 +--
 drivers/gpu/drm/i915/i915_gpu_error.c       |   3 +-
 drivers/gpu/drm/i915/intel_guc.c            |  75 ++++-----------------
 drivers/gpu/drm/i915/intel_guc.h            |  10 ++-
 drivers/gpu/drm/i915/intel_guc_ads.c        |  18 ++---
 drivers/gpu/drm/i915/intel_guc_bo.c         | 100 ++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_guc_bo.h         |  34 ++++++++++
 drivers/gpu/drm/i915/intel_guc_ct.c         |  41 +++++-------
 drivers/gpu/drm/i915/intel_guc_ct.h         |   2 +-
 drivers/gpu/drm/i915/intel_guc_log.c        |  35 +++++-----
 drivers/gpu/drm/i915/intel_guc_log.h        |   2 +-
 drivers/gpu/drm/i915/intel_guc_submission.c |  73 ++++++++------------
 drivers/gpu/drm/i915/intel_guc_submission.h |   3 +-
 drivers/gpu/drm/i915/intel_uc.c             |   4 +-
 15 files changed, 234 insertions(+), 177 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/intel_guc_bo.c
 create mode 100644 drivers/gpu/drm/i915/intel_guc_bo.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1bd9bc5..b78c28b0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -83,6 +83,7 @@ i915-y += i915_cmd_parser.o \
 # general-purpose microcontroller (GuC) support
 i915-y += intel_uc.o \
 	  intel_uc_fw.o \
+	  intel_guc_bo.o \
 	  intel_guc.o \
 	  intel_guc_ads.o \
 	  intel_guc_ct.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e838c76..4471f78 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2407,14 +2407,16 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	const struct intel_guc *guc = &dev_priv->guc;
-	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
 	struct intel_guc_client *client = guc->execbuf_client;
+	struct guc_stage_desc *desc;
 	unsigned int tmp;
 	int index;
 
 	if (!USES_GUC_SUBMISSION(dev_priv))
 		return -ENODEV;
 
+	desc = guc->stage_desc_pool->cpu_vaddr;
+
 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
 		struct intel_engine_cs *engine;
 
@@ -2471,8 +2473,8 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 
 	if (dump_load_err)
 		obj = dev_priv->guc.load_err_log;
-	else if (dev_priv->guc.log.vma)
-		obj = dev_priv->guc.log.vma->obj;
+	else if (dev_priv->guc.log.bo)
+		obj = dev_priv->guc.log.bo->vma->obj;
 
 	if (!obj)
 		return 0;
@@ -2503,7 +2505,7 @@ static int i915_guc_log_control_get(void *data, u64 *val)
 	if (!HAS_GUC(dev_priv))
 		return -ENODEV;
 
-	if (!dev_priv->guc.log.vma)
+	if (!dev_priv->guc.log.bo)
 		return -EINVAL;
 
 	*val = i915_modparams.guc_log_level;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a7933c9..c2d361d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1622,7 +1622,8 @@ static void capture_uc_state(struct i915_gpu_state *error)
 	 */
 	error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
 	error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
-	error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
+	error_uc->guc_log = i915_error_object_create(i915,
+						     i915->guc.log.bo->vma);
 }
 
 /* Capture all registers which don't fit into another category. */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index e6512cc..151bba1 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -134,29 +134,20 @@ void intel_guc_fini_wq(struct intel_guc *guc)
 
 static int guc_shared_data_create(struct intel_guc *guc)
 {
-	struct i915_vma *vma;
-	void *vaddr;
+	struct intel_guc_bo *guc_bo;
 
-	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
+	guc_bo = intel_guc_bo_alloc(guc, PAGE_SIZE, true);
+	if (IS_ERR(guc_bo))
+		return PTR_ERR(guc_bo);
 
-	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		i915_vma_unpin_and_release(&vma);
-		return PTR_ERR(vaddr);
-	}
-
-	guc->shared_data = vma;
-	guc->shared_data_vaddr = vaddr;
+	guc->shared_data = guc_bo;
 
 	return 0;
 }
 
 static void guc_shared_data_destroy(struct intel_guc *guc)
 {
-	i915_gem_object_unpin_map(guc->shared_data->obj);
-	i915_vma_unpin_and_release(&guc->shared_data);
+	intel_guc_bo_free(guc->shared_data);
 }
 
 int intel_guc_init(struct intel_guc *guc)
@@ -176,7 +167,7 @@ int intel_guc_init(struct intel_guc *guc)
 	ret = intel_guc_ads_create(guc);
 	if (ret)
 		goto err_log;
-	GEM_BUG_ON(!guc->ads_vma);
+	GEM_BUG_ON(!guc->ads);
 
 	/* We need to notify the guc whenever we change the GGTT */
 	i915_ggtt_enable_guc(dev_priv);
@@ -269,8 +260,8 @@ void intel_guc_init_params(struct intel_guc *guc)
 
 	/* If GuC submission is enabled, set up additional parameters here */
 	if (USES_GUC_SUBMISSION(dev_priv)) {
-		u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
-		u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+		u32 ads = guc->ads->ggtt_offset >> PAGE_SHIFT;
+		u32 pgs = guc->stage_desc_pool->ggtt_offset;
 		u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
 
 		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
@@ -418,7 +409,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
 	data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
 	/* any value greater than GUC_POWER_D0 */
 	data[1] = GUC_POWER_D1;
-	data[2] = guc_ggtt_offset(guc->shared_data);
+	data[2] = guc->shared_data->ggtt_offset;
 
 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
@@ -441,7 +432,7 @@ int intel_guc_reset_engine(struct intel_guc *guc,
 	data[3] = 0;
 	data[4] = 0;
 	data[5] = guc->execbuf_client->stage_id;
-	data[6] = guc_ggtt_offset(guc->shared_data);
+	data[6] = guc->shared_data->ggtt_offset;
 
 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
@@ -463,53 +454,11 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
 
 	data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
 	data[1] = GUC_POWER_D0;
-	data[2] = guc_ggtt_offset(guc->shared_data);
+	data[2] = guc->shared_data->ggtt_offset;
 
 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
 
-/**
- * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
- * @guc:	the guc
- * @size:	size of area to allocate (both virtual space and memory)
- *
- * This is a wrapper to create an object for use with the GuC. In order to
- * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
- * both some backing storage and a range inside the Global GTT. We must pin
- * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
- * range is reserved inside GuC.
- *
- * Return:	A i915_vma if successful, otherwise an ERR_PTR.
- */
-struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
-{
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	int ret;
-
-	obj = i915_gem_object_create(dev_priv, size);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
-	if (IS_ERR(vma))
-		goto err;
-
-	ret = i915_vma_pin(vma, 0, PAGE_SIZE,
-			   PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
-	if (ret) {
-		vma = ERR_PTR(ret);
-		goto err;
-	}
-
-	return vma;
-
-err:
-	i915_gem_object_put(obj);
-	return vma;
-}
-
 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
 {
 	u32 wopcm_size = GUC_WOPCM_TOP;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 52856a9..adf03b7 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -32,6 +32,7 @@
 #include "intel_guc_log.h"
 #include "intel_guc_reg.h"
 #include "intel_uc_fw.h"
+#include "intel_guc_bo.h"
 #include "i915_vma.h"
 
 struct guc_preempt_work {
@@ -55,12 +56,10 @@ struct intel_guc {
 	/* intel_guc_recv interrupt related state */
 	bool interrupts_enabled;
 
-	struct i915_vma *ads_vma;
-	struct i915_vma *stage_desc_pool;
-	void *stage_desc_pool_vaddr;
+	struct intel_guc_bo *ads;
+	struct intel_guc_bo *stage_desc_pool;
 	struct ida stage_ids;
-	struct i915_vma *shared_data;
-	void *shared_data_vaddr;
+	struct intel_guc_bo *shared_data;
 
 	struct intel_guc_client *execbuf_client;
 	struct intel_guc_client *preempt_client;
@@ -129,7 +128,6 @@ int intel_guc_sample_forcewake(struct intel_guc *guc);
 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct drm_i915_private *dev_priv);
 int intel_guc_resume(struct drm_i915_private *dev_priv);
-struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index ac62753..6587185 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -75,7 +75,7 @@ static void guc_policies_init(struct guc_policies *policies)
 int intel_guc_ads_create(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct i915_vma *vma;
+	struct intel_guc_bo *ads_bo;
 	struct page *page;
 	/* The ads obj includes the struct itself and buffers passed to GuC */
 	struct {
@@ -90,15 +90,15 @@ int intel_guc_ads_create(struct intel_guc *guc)
 	const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
 	u32 base;
 
-	GEM_BUG_ON(guc->ads_vma);
+	GEM_BUG_ON(guc->ads);
 
-	vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
+	ads_bo = intel_guc_bo_alloc(guc, PAGE_ALIGN(sizeof(*blob)), false);
+	if (IS_ERR(ads_bo))
+		return PTR_ERR(ads_bo);
 
-	guc->ads_vma = vma;
+	guc->ads = ads_bo;
 
-	page = i915_vma_first_page(vma);
+	page = i915_vma_first_page(ads_bo->vma);
 	blob = kmap(page);
 
 	/* GuC scheduling policies */
@@ -135,7 +135,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
 		blob->ads.eng_state_size[engine->guc_id] =
 			engine->context_size - skipped_size;
 
-	base = guc_ggtt_offset(vma);
+	base = ads_bo->ggtt_offset;
 	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
 	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
 	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
@@ -147,5 +147,5 @@ int intel_guc_ads_create(struct intel_guc *guc)
 
 void intel_guc_ads_destroy(struct intel_guc *guc)
 {
-	i915_vma_unpin_and_release(&guc->ads_vma);
+	intel_guc_bo_free(guc->ads);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_bo.c b/drivers/gpu/drm/i915/intel_guc_bo.c
new file mode 100644
index 0000000..e7bd5c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_bo.c
@@ -0,0 +1,100 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "intel_guc_bo.h"
+#include "intel_guc.h"
+#include "i915_drv.h"
+
+/**
+ * intel_guc_bo_alloc() - Allocate an intel_guc_bo object.
+ * @guc: intel_guc structure.
+ * @size: size in byte of the GuC buffer object.
+ * @cpu_map: whether pin and map this GuC buffer object in CPU address space.
+ *
+ * GuC hardware requires an i915 GEM object to be pinned above GUC_WOPCM_TOP.
+ * This function serves as a wrapper to allocate, pin a i915 GEM object in to
+ * both GGTT and CPU address space by enforcing the GuC hardware requirement
+ * on the GGTT offset bias while pinning the buffer to GGTT address space.
+ * Depends on the @cpu_map flags, this function will pin and map the GEM object
+ * into CPU address space with I915_MAP_WB cache policy if @cpu_map was true.
+ *
+ * Return: An intel_guc_bo on success. ERR_PTR will be returned on failure.
+ */
+struct intel_guc_bo *intel_guc_bo_alloc(struct intel_guc *guc, u32 size,
+					bool cpu_map)
+{
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+	struct drm_i915_gem_object *obj;
+	struct intel_guc_bo *guc_bo;
+	void *err_ptr;
+	int err;
+
+	guc_bo = kzalloc(sizeof(*guc_bo), GFP_KERNEL);
+	if (!guc_bo)
+		return ERR_PTR(-ENOMEM);
+
+	obj = i915_gem_object_create(i915, size);
+	if (IS_ERR(obj)) {
+		err_ptr = ERR_CAST(obj);
+		goto obj_err;
+	}
+
+	guc_bo->vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+	if (IS_ERR(guc_bo->vma)) {
+		err_ptr = ERR_CAST(guc_bo->vma);
+		goto vma_err;
+	}
+
+	err = i915_vma_pin(guc_bo->vma, 0, PAGE_SIZE,
+			   PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+	if (err) {
+		err_ptr = ERR_PTR(err);
+		goto pin_err;
+	}
+
+	if (cpu_map) {
+		guc_bo->cpu_vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+		if (IS_ERR(guc_bo->cpu_vaddr)) {
+			err_ptr = ERR_CAST(guc_bo->cpu_vaddr);
+			goto map_err;
+		}
+	}
+
+	guc_bo->ggtt_offset = guc_ggtt_offset(guc_bo->vma);
+
+	return guc_bo;
+
+map_err:
+	i915_vma_unpin(guc_bo->vma);
+pin_err:
+	i915_vma_close(guc_bo->vma);
+vma_err:
+	i915_gem_object_put(obj);
+obj_err:
+	kfree(guc_bo);
+	return err_ptr;
+}
+
+/**
+ * intel_guc_bo_free() - Free an GuC buffer object.
+ * @guc_bo: intel_guc_bo needs to be freed.
+ *
+ * Unpin/unmap the underlying GEM oject from CPU/GGTT, release any memory
+ * allocated for this intel_guc_bo.
+ */
+void intel_guc_bo_free(struct intel_guc_bo *guc_bo)
+{
+	if (guc_bo) {
+		GEM_BUG_ON(!guc_bo->vma);
+
+		if (guc_bo->cpu_vaddr)
+			i915_gem_object_unpin_map(guc_bo->vma->obj);
+
+		i915_vma_unpin_and_release(&guc_bo->vma);
+
+		kfree(guc_bo);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_bo.h b/drivers/gpu/drm/i915/intel_guc_bo.h
new file mode 100644
index 0000000..547d201
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_bo.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_BO_H_
+#define _INTEL_GUC_BO_H_
+
+#include "i915_vma.h"
+
+struct intel_guc;
+
+/**
+ * struct intel_guc_bo - GuC accessible buffer object.
+ * @vma: i915_vma structure.
+ * @ggtt_offset: pinned offset in GGTT.
+ * @cpu_vaddr: mapped CPU address.
+ *
+ * GuC hardware requires all GuC accessible buffer to be pinned above
+ * GUC_WOPCM_TOP in GGTT address space. This structure serves as a wrapper
+ * on a generic i915_vma.
+ */
+struct intel_guc_bo {
+	struct i915_vma *vma;
+	u32 ggtt_offset;
+	void *cpu_vaddr;
+};
+
+struct intel_guc_bo *intel_guc_bo_alloc(struct intel_guc *guc, u32 size,
+					bool cpu_map);
+void intel_guc_bo_free(struct intel_guc_bo *guc_bo);
+
+#endif /* _INTEL_GUC_BO_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 24ad557..d687555 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -107,18 +107,18 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
 
 static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
 {
-	return ctch->vma != NULL;
+	return ctch->ctbs_bo != NULL;
 }
 
 static int ctch_init(struct intel_guc *guc,
 		     struct intel_guc_ct_channel *ctch)
 {
-	struct i915_vma *vma;
+	struct intel_guc_bo *guc_bo;
 	void *blob;
 	int err;
 	int i;
 
-	GEM_BUG_ON(ctch->vma);
+	GEM_BUG_ON(ctch->ctbs_bo);
 
 	/* We allocate 1 page to hold both descriptors and both buffers.
 	 *       ___________.....................
@@ -142,33 +142,26 @@ static int ctch_init(struct intel_guc *guc,
 	 * other code will need updating as well.
 	 */
 
-	/* allocate vma */
-	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
+	guc_bo = intel_guc_bo_alloc(guc, PAGE_SIZE, true);
+	if (IS_ERR(guc_bo)) {
+		err = PTR_ERR(guc_bo);
 		goto err_out;
 	}
-	ctch->vma = vma;
 
-	/* map first page */
-	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
-	if (IS_ERR(blob)) {
-		err = PTR_ERR(blob);
-		goto err_vma;
-	}
-	DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma));
+	ctch->ctbs_bo = guc_bo;
+
+	DRM_DEBUG_DRIVER("CT: vma base=%#x\n", ctch->ctbs_bo->ggtt_offset);
 
+	blob = guc_bo->cpu_vaddr;
 	/* store pointers to desc and cmds */
 	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
 		ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
-		ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
+		ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i +PAGE_SIZE/2;
 	}
 
 	return 0;
 
-err_vma:
-	i915_vma_unpin_and_release(&ctch->vma);
 err_out:
 	DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
 			 ctch->owner, err);
@@ -178,10 +171,8 @@ static int ctch_init(struct intel_guc *guc,
 static void ctch_fini(struct intel_guc *guc,
 		      struct intel_guc_ct_channel *ctch)
 {
-	GEM_BUG_ON(!ctch->vma);
-
-	i915_gem_object_unpin_map(ctch->vma->obj);
-	i915_vma_unpin_and_release(&ctch->vma);
+	GEM_BUG_ON(!ctch->ctbs_bo);
+	intel_guc_bo_free(ctch->ctbs_bo);
 }
 
 static int ctch_open(struct intel_guc *guc,
@@ -194,15 +185,15 @@ static int ctch_open(struct intel_guc *guc,
 	DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
 			 ctch->owner, yesno(ctch_is_open(ctch)));
 
-	if (!ctch->vma) {
+	if (!ctch->ctbs_bo) {
 		err = ctch_init(guc, ctch);
 		if (unlikely(err))
 			goto err_out;
-		GEM_BUG_ON(!ctch->vma);
+		GEM_BUG_ON(!ctch->ctbs_bo);
 	}
 
 	/* vma should be already allocated and map'ed */
-	base = guc_ggtt_offset(ctch->vma);
+	base = ctch->ctbs_bo->ggtt_offset;
 
 	/* (re)initialize descriptors
 	 * cmds buffers are in the second half of the blob page
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index 6d97f36..89c9228 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -62,7 +62,7 @@ struct intel_guc_ct_buffer {
  * @next_fence: fence to be used with next send command
  */
 struct intel_guc_ct_channel {
-	struct i915_vma *vma;
+	struct intel_guc_bo *ctbs_bo;
 	struct intel_guc_ct_buffer ctbs[2];
 	u32 owner;
 	u32 next_fence;
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 7b5074e..4bc05d9 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -211,7 +211,8 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
 		return;
 
 	/* All data has been written, so now move the offset of sub buffer. */
-	relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
+	relay_reserve(guc->log.runtime.relay_chan,
+		      guc->log.bo->vma->obj->base.size);
 
 	/* Switch to the next sub buffer */
 	relay_flush(guc->log.runtime.relay_chan);
@@ -397,12 +398,12 @@ static int guc_log_runtime_create(struct intel_guc *guc)
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-	if (!guc->log.vma)
+	if (!guc->log.bo)
 		return -ENODEV;
 
 	GEM_BUG_ON(guc_log_has_runtime(guc));
 
-	ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
+	ret = i915_gem_object_set_to_wc_domain(guc->log.bo->vma->obj, true);
 	if (ret)
 		return ret;
 
@@ -411,7 +412,7 @@ static int guc_log_runtime_create(struct intel_guc *guc)
 	 * buffer pages, so that we can directly get the data
 	 * (up-to-date) from memory.
 	 */
-	vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+	vaddr = i915_gem_object_pin_map(guc->log.bo->vma->obj, I915_MAP_WC);
 	if (IS_ERR(vaddr)) {
 		DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
 		return PTR_ERR(vaddr);
@@ -431,7 +432,7 @@ static void guc_log_runtime_destroy(struct intel_guc *guc)
 	if (!guc_log_has_runtime(guc))
 		return;
 
-	i915_gem_object_unpin_map(guc->log.vma->obj);
+	i915_gem_object_unpin_map(guc->log.bo->vma->obj);
 	guc->log.runtime.buf_addr = NULL;
 }
 
@@ -601,12 +602,12 @@ static void guc_flush_logs(struct intel_guc *guc)
 
 int intel_guc_log_create(struct intel_guc *guc)
 {
-	struct i915_vma *vma;
+	struct intel_guc_bo *bo;
 	unsigned long offset;
 	u32 flags;
 	int ret;
 
-	GEM_BUG_ON(guc->log.vma);
+	GEM_BUG_ON(guc->log.bo);
 
 	/*
 	 * We require SSE 4.1 for fast reads from the GuC log buffer and
@@ -618,18 +619,18 @@ int intel_guc_log_create(struct intel_guc *guc)
 		goto err;
 	}
 
-	vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
-	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
+	bo = intel_guc_bo_alloc(guc, GUC_LOG_SIZE, false);
+	if (IS_ERR(bo)) {
+		ret = PTR_ERR(bo);
 		goto err;
 	}
 
-	guc->log.vma = vma;
+	guc->log.bo = bo;
 
 	if (i915_modparams.guc_log_level) {
 		ret = guc_log_runtime_create(guc);
 		if (ret < 0)
-			goto err_vma;
+			goto err_bo;
 	}
 
 	/* each allocated unit is a page */
@@ -638,13 +639,13 @@ int intel_guc_log_create(struct intel_guc *guc)
 		(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
 		(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
 
-	offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+	offset = bo->ggtt_offset >> PAGE_SHIFT; /* in pages */
 	guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 
 	return 0;
 
-err_vma:
-	i915_vma_unpin_and_release(&guc->log.vma);
+err_bo:
+	intel_guc_bo_free(guc->log.bo);
 err:
 	/* logging will be off */
 	i915_modparams.guc_log_level = 0;
@@ -654,7 +655,7 @@ int intel_guc_log_create(struct intel_guc *guc)
 void intel_guc_log_destroy(struct intel_guc *guc)
 {
 	guc_log_runtime_destroy(guc);
-	i915_vma_unpin_and_release(&guc->log.vma);
+	intel_guc_bo_free(guc->log.bo);
 }
 
 int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
@@ -664,7 +665,7 @@ int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
 	u32 verbosity;
 	int ret;
 
-	if (!guc->log.vma)
+	if (!guc->log.bo)
 		return -ENODEV;
 
 	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index dab0e94..291a41c 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -41,7 +41,7 @@ struct intel_guc;
 
 struct intel_guc_log {
 	u32 flags;
-	struct i915_vma *vma;
+	struct intel_guc_bo *bo;
 	/* The runtime stuff gets created only when GuC logging gets enabled */
 	struct {
 		void *buf_addr;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 8a8ad2f..809bda6 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -158,7 +158,7 @@ static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
 
 static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
 {
-	struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
+	struct guc_stage_desc *base = client->guc->stage_desc_pool->cpu_vaddr;
 
 	return &base[client->stage_id];
 }
@@ -181,7 +181,7 @@ static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
 
 static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
 {
-	return client->vaddr + client->doorbell_offset;
+	return client->bo->cpu_vaddr + client->doorbell_offset;
 }
 
 static bool has_doorbell(struct intel_guc_client *client)
@@ -274,7 +274,7 @@ static unsigned long __select_cacheline(struct intel_guc *guc)
 static inline struct guc_process_desc *
 __get_process_desc(struct intel_guc_client *client)
 {
-	return client->vaddr + client->proc_desc_offset;
+	return client->bo->cpu_vaddr + client->proc_desc_offset;
 }
 
 /*
@@ -304,23 +304,16 @@ static void guc_proc_desc_init(struct intel_guc *guc,
 
 static int guc_stage_desc_pool_create(struct intel_guc *guc)
 {
-	struct i915_vma *vma;
-	void *vaddr;
-
-	vma = intel_guc_allocate_vma(guc,
-				     PAGE_ALIGN(sizeof(struct guc_stage_desc) *
-				     GUC_MAX_STAGE_DESCRIPTORS));
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
-
-	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		i915_vma_unpin_and_release(&vma);
-		return PTR_ERR(vaddr);
-	}
+	struct intel_guc_bo *guc_bo;
+
+	guc_bo = intel_guc_bo_alloc(guc,
+				    PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+				    GUC_MAX_STAGE_DESCRIPTORS),
+				    true);
+	if (IS_ERR(guc_bo))
+		return PTR_ERR(guc_bo);
 
-	guc->stage_desc_pool = vma;
-	guc->stage_desc_pool_vaddr = vaddr;
+	guc->stage_desc_pool = guc_bo;
 	ida_init(&guc->stage_ids);
 
 	return 0;
@@ -329,8 +322,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc)
 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
 {
 	ida_destroy(&guc->stage_ids);
-	i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
-	i915_vma_unpin_and_release(&guc->stage_desc_pool);
+	intel_guc_bo_free(guc->stage_desc_pool);
 }
 
 /*
@@ -411,8 +403,8 @@ static void guc_stage_desc_init(struct intel_guc *guc,
 	 * The doorbell, process descriptor, and workqueue are all parts
 	 * of the client object, which the GuC will reference via the GGTT
 	 */
-	gfx_addr = guc_ggtt_offset(client->vma);
-	desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
+	gfx_addr = client->bo->ggtt_offset;
+	desc->db_trigger_phy = sg_dma_address(client->bo->vma->pages->sgl) +
 				client->doorbell_offset;
 	desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
 	desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
@@ -462,7 +454,7 @@ static void guc_wq_item_append(struct intel_guc_client *client,
 	GEM_BUG_ON(wq_off & (wqi_size - 1));
 
 	/* WQ starts from the page after doorbell / process_desc */
-	wqi = client->vaddr + wq_off + GUC_DB_SIZE;
+	wqi = client->bo->cpu_vaddr + wq_off + GUC_DB_SIZE;
 
 	/* Now fill in the 4-word work queue item */
 	wqi->header = WQ_TYPE_INORDER |
@@ -584,7 +576,7 @@ static void inject_preempt_context(struct work_struct *work)
 	data[3] = engine->guc_id;
 	data[4] = guc->execbuf_client->priority;
 	data[5] = guc->execbuf_client->stage_id;
-	data[6] = guc_ggtt_offset(guc->shared_data);
+	data[6] = guc->shared_data->ggtt_offset;
 
 	if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
 		execlists_clear_active(&engine->execlists,
@@ -605,7 +597,7 @@ static void inject_preempt_context(struct work_struct *work)
 static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
 {
 	struct intel_guc *guc = &engine->i915->guc;
-	struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
+	struct guc_shared_ctx_data *data = guc->shared_data->cpu_vaddr;
 	struct guc_ctx_report *report =
 		&data->preempt_ctx_report[engine->guc_id];
 
@@ -870,8 +862,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 {
 	struct intel_guc_client *client;
 	struct intel_guc *guc = &dev_priv->guc;
-	struct i915_vma *vma;
-	void *vaddr;
+	struct intel_guc_bo *guc_bo;
 	int ret;
 
 	client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -893,21 +884,14 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 	client->stage_id = ret;
 
 	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
-	vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
-	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
+	guc_bo = intel_guc_bo_alloc(guc, GUC_DB_SIZE + GUC_WQ_SIZE, true);
+	if (IS_ERR(guc_bo)) {
+		ret = PTR_ERR(guc_bo);
 		goto err_id;
 	}
 
 	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
-	client->vma = vma;
-
-	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		ret = PTR_ERR(vaddr);
-		goto err_vma;
-	}
-	client->vaddr = vaddr;
+	client->bo = guc_bo;
 
 	client->doorbell_offset = __select_cacheline(guc);
 
@@ -926,7 +910,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 
 	ret = reserve_doorbell(client);
 	if (ret)
-		goto err_vaddr;
+		goto err_bo;
 
 	DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
 			 priority, client, client->engines, client->stage_id);
@@ -935,10 +919,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 
 	return client;
 
-err_vaddr:
-	i915_gem_object_unpin_map(client->vma->obj);
-err_vma:
-	i915_vma_unpin_and_release(&client->vma);
+err_bo:
+	intel_guc_bo_free(client->bo);
 err_id:
 	ida_simple_remove(&guc->stage_ids, client->stage_id);
 err_client:
@@ -950,8 +932,7 @@ static void guc_client_free(struct intel_guc_client *client)
 {
 	unreserve_doorbell(client);
 	guc_stage_desc_fini(client->guc, client);
-	i915_gem_object_unpin_map(client->vma->obj);
-	i915_vma_unpin_and_release(&client->vma);
+	intel_guc_bo_free(client->bo);
 	ida_simple_remove(&client->guc->stage_ids, client->stage_id);
 	kfree(client);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index fb081ce..f7795d0 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -53,8 +53,7 @@ struct drm_i915_private;
  * descriptor. Work queue pages are mapped momentarily as required.
  */
 struct intel_guc_client {
-	struct i915_vma *vma;
-	void *vaddr;
+	struct intel_guc_bo *bo;
 	struct i915_gem_context *owner;
 	struct intel_guc *guc;
 
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 8e25474..10cbc7a 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -204,11 +204,11 @@ void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
 
 static void guc_capture_load_err_log(struct intel_guc *guc)
 {
-	if (!guc->log.vma || !i915_modparams.guc_log_level)
+	if (!guc->log.bo || !i915_modparams.guc_log_level)
 		return;
 
 	if (!guc->load_err_log)
-		guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
+		guc->load_err_log = i915_gem_object_get(guc->log.bo->vma->obj);
 
 	return;
 }
-- 
2.7.4



More information about the Intel-gfx-trybot mailing list