[PATCH 19/23] drm/amdgpu/mxgpu: add implementation of GPU virtualization of VI

Xiangliang Yu Xiangliang.Yu at amd.com
Sat Dec 17 16:16:41 UTC 2016


Different chips will have different virtual behaviors, so need to
implemnt different virtual feature according to hardware design.

This patch will implemnt Vi family virtualization, it will call
CSA, mailbox interface and allocate wb offset for KIQ.

Signed-off-by: Xiangliang Yu <Xiangliang.Yu at amd.com>
---
 drivers/gpu/drm/amd/mxgpu/amd_mxgpu.h |   2 +
 drivers/gpu/drm/amd/mxgpu/mxgpu_vi.c  | 205 ++++++++++++++++++++++++++++++++++
 2 files changed, 207 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/mxgpu/mxgpu_vi.c

diff --git a/drivers/gpu/drm/amd/mxgpu/amd_mxgpu.h b/drivers/gpu/drm/amd/mxgpu/amd_mxgpu.h
index 54e7b31..02a8839 100644
--- a/drivers/gpu/drm/amd/mxgpu/amd_mxgpu.h
+++ b/drivers/gpu/drm/amd/mxgpu/amd_mxgpu.h
@@ -88,4 +88,6 @@ extern void xgpu_destroy_csa(struct amd_xgpu_csa *csa);
 
 extern int xgpu_request_full_gpu_access(struct amdgpu_device *adev, bool init);
 extern int xgpu_release_full_gpu_access(struct amdgpu_device *adev, bool init);
+
+extern void xgpu_vi_add_ip_blocks(struct amdgpu_device *adev);
 #endif
diff --git a/drivers/gpu/drm/amd/mxgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/mxgpu/mxgpu_vi.c
new file mode 100644
index 0000000..e5d517f
--- /dev/null
+++ b/drivers/gpu/drm/amd/mxgpu/mxgpu_vi.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Xiangliang.Yu at amd.com
+ */
+#include "amd_mxgpu.h"
+#include "vi.h"
+#include "gmc_v8_0.h"
+#include "gfx_v8_0.h"
+#include "sdma_v3_0.h"
+#include "tonga_ih.h"
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "amdgpu_powerplay.h"
+#include "dce_virtual.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_enum.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "smu/smu_7_1_3_d.h"
+
+static int xgpu_vi_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = amd_xgpu_alloc(adev);
+	if (r)
+		return r;
+
+	r = xgpu_request_full_gpu_access(adev, true);
+	if (r) {
+		DRM_ERROR("failed to send message to HV (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+static int xgpu_vi_late_init(void *handle)
+{
+	return 0;
+}
+
+static int xgpu_vi_sw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amd_xgpu *xgpu = (struct amd_xgpu *)adev->priv_data;
+
+	return xgpu_allocate_csa(xgpu);
+}
+
+static int xgpu_vi_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amd_xgpu *xgpu = (struct amd_xgpu *)adev->priv_data;
+
+	xgpu_destroy_csa(&xgpu->sa);
+
+	return 0;
+}
+
+static int xgpu_vi_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amd_xgpu *xgpu = (struct amd_xgpu *)adev->priv_data;
+	int r;
+
+	r = amdgpu_wb_get(adev, &xgpu->reg_val_offs);
+	if (r) {
+		DRM_ERROR("failed to alloc reg val offs(%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+static int xgpu_vi_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amd_xgpu *xgpu = (struct amd_xgpu *)adev->priv_data;
+
+	amdgpu_wb_free(adev, xgpu->reg_val_offs);
+	return 0;
+}
+
+static void xgpu_vi_late_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amd_xgpu *xgpu = (struct amd_xgpu *)adev->priv_data;
+
+	amd_xgpu_free(xgpu);
+	adev->priv_data = NULL;
+
+	xgpu_release_full_gpu_access(adev, false);
+}
+
+static int xgpu_vi_suspend(void *handle)
+{
+	return 0;
+}
+
+static int xgpu_vi_resume(void *handle)
+{
+	return 0;
+}
+
+static bool xgpu_vi_is_idle(void *handle)
+{
+	return false;
+}
+
+static int xgpu_vi_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static bool xgpu_vi_check_soft_reset(void *handle)
+{
+	return false;
+}
+
+static int xgpu_vi_pre_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int xgpu_vi_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int xgpu_vi_post_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int xgpu_vi_set_clockgating_state(void *handle,
+					 enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs xgpu_vi_ip_funcs = {
+	.name = "xgpu-vi",
+	.early_init = xgpu_vi_early_init,
+	.late_init = xgpu_vi_late_init,
+	.sw_init = xgpu_vi_sw_init,
+	.sw_fini = xgpu_vi_sw_fini,
+	.hw_init = xgpu_vi_hw_init,
+	.hw_fini = xgpu_vi_hw_fini,
+	.late_fini = xgpu_vi_late_fini,
+	.suspend = xgpu_vi_suspend,
+	.resume = xgpu_vi_resume,
+	.is_idle = xgpu_vi_is_idle,
+	.wait_for_idle = xgpu_vi_wait_for_idle,
+	.check_soft_reset = xgpu_vi_check_soft_reset,
+	.pre_soft_reset = xgpu_vi_pre_soft_reset,
+	.soft_reset = xgpu_vi_soft_reset,
+	.post_soft_reset = xgpu_vi_post_soft_reset,
+	.set_clockgating_state = xgpu_vi_set_clockgating_state,
+};
+
+static const struct amdgpu_ip_block_version xgpu_vi_ip_block = {
+	.type = AMD_IP_BLOCK_TYPE_XGPU,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &xgpu_vi_ip_funcs,
+};
+
+void xgpu_vi_add_ip_blocks(struct amdgpu_device *adev)
+{
+	amdgpu_ip_block_add(adev, &vi_common_ip_block);
+	amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
+	amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+	amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+	amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+	amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+	amdgpu_ip_block_add(adev, &xgpu_vi_ip_block);
+	amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+}
-- 
2.7.4



More information about the amd-gfx mailing list