[Nouveau] [PATCH v4 25/33] pmu: support for GM20X

Alexandre Courbot acourbot at nvidia.com
Mon Nov 21 08:29:22 UTC 2016


From: Deepak Goyal <dgoyal at nvidia.com>

Add support for NVIDIA-signed PMU firmware for the GM20X family of
chips.

This includes the way commands and message queues are handled, as
well as core interfaces for secure boot to signal the PMU firmware
version used and to generate the proper command line for it, and
a new interface function to boot a given falcon using the PMU's ACR
unit.

Signed-off-by: Deepak Goyal <dgoyal at nvidia.com>
[acourbot at nvidia.com: reorganize code]
Signed-off-by: Alexandre Courbot <acourbot at nvidia.com>
---
 drm/nouveau/include/nvkm/subdev/pmu.h |  11 +-
 drm/nouveau/nvkm/subdev/pmu/Kbuild    |   1 +-
 drm/nouveau/nvkm/subdev/pmu/base.c    |  47 ++-
 drm/nouveau/nvkm/subdev/pmu/gm200.c   | 713 +++++++++++++++++++++++++++-
 drm/nouveau/nvkm/subdev/pmu/gm200.h   | 104 ++++-
 drm/nouveau/nvkm/subdev/pmu/nv_pmu.h  |  50 ++-
 drm/nouveau/nvkm/subdev/pmu/priv.h    |  18 +-
 7 files changed, 944 insertions(+), 0 deletions(-)
 create mode 100644 drm/nouveau/nvkm/subdev/pmu/gm200.c
 create mode 100644 drm/nouveau/nvkm/subdev/pmu/gm200.h
 create mode 100644 drm/nouveau/nvkm/subdev/pmu/nv_pmu.h

diff --git a/drm/nouveau/include/nvkm/subdev/pmu.h b/drm/nouveau/include/nvkm/subdev/pmu.h
index f37538eb1fe5..151003e0500e 100644
--- a/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -1,9 +1,11 @@
 #ifndef __NVKM_PMU_H__
 #define __NVKM_PMU_H__
 #include <core/subdev.h>
+#include <core/falcon.h>
 
 struct nvkm_pmu {
 	const struct nvkm_pmu_func *func;
+	const struct nv_pmu_func *nv_func;
 	struct nvkm_subdev subdev;
 
 	struct {
@@ -27,6 +29,14 @@ int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
 		  u32 message, u32 data0, u32 data1);
 void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
 
+/* useful if we run a NVIDIA-signed firmware */
+int nvkm_pmu_set_version(struct nvkm_pmu *, u32);
+u32 nvkm_pmu_cmdline_size(struct nvkm_pmu *);
+void nvkm_pmu_write_cmdline(struct nvkm_pmu *, void *);
+
+/* interface to ACR unit running on PMU (NVIDIA signed firmware) */
+int nvkm_pmu_acr_boot_falcon(struct nvkm_pmu *, enum nvkm_falconidx);
+
 int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
@@ -35,6 +45,7 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gm200_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 
diff --git a/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 51fb4bf94a44..141f3ee6ffe4 100644
--- a/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -8,5 +8,6 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
 nvkm-y += nvkm/subdev/pmu/gk208.o
 nvkm-y += nvkm/subdev/pmu/gk20a.o
 nvkm-y += nvkm/subdev/pmu/gm107.o
+nvkm-y += nvkm/subdev/pmu/gm200.o
 nvkm-y += nvkm/subdev/pmu/gp100.o
 nvkm-y += nvkm/subdev/pmu/gp102.o
diff --git a/drm/nouveau/nvkm/subdev/pmu/base.c b/drm/nouveau/nvkm/subdev/pmu/base.c
index 5548258a4510..20bd5585df15 100644
--- a/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -120,6 +120,53 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev)
 	return nvkm_pmu(subdev);
 }
 
+u32 nvkm_pmu_cmdline_size(struct nvkm_pmu *pmu)
+{
+	if (!pmu || !pmu->nv_func || !pmu->nv_func->init)
+		return 0;
+
+	return pmu->nv_func->init->cmdline_size;
+}
+
+void
+nvkm_pmu_write_cmdline(struct nvkm_pmu *pmu, void *buf)
+{
+	if (!pmu || !pmu->nv_func || !pmu->nv_func->init)
+		return;
+
+	pmu->nv_func->init->gen_cmdline(pmu, buf);
+}
+
+int
+nvkm_pmu_acr_boot_falcon(struct nvkm_pmu *pmu, enum nvkm_falconidx falcon)
+{
+	if (!pmu || !pmu->nv_func || !pmu->nv_func->acr ||
+	    !pmu->nv_func->acr->boot_falcon)
+		return -ENODEV;
+
+	return pmu->nv_func->acr->boot_falcon(pmu, falcon);
+}
+
+int
+nvkm_pmu_set_version(struct nvkm_pmu *pmu, u32 version)
+{
+	struct nvkm_subdev *subdev = &pmu->subdev;
+
+	if (!pmu)
+		return -ENODEV;
+
+	switch (version) {
+	default:
+		nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
+			   version);
+		return -EINVAL;
+	};
+
+	nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
+
+	return 0;
+}
+
 static const struct nvkm_subdev_func
 nvkm_pmu = {
 	.dtor = nvkm_pmu_dtor,
diff --git a/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drm/nouveau/nvkm/subdev/pmu/gm200.c
new file mode 100644
index 000000000000..11bf24f35d21
--- /dev/null
+++ b/drm/nouveau/nvkm/subdev/pmu/gm200.c
@@ -0,0 +1,713 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "gm200.h"
+#include <core/falcon.h>
+
+/* Max size of the messages we can receive */
+#define PMU_MSG_BUF_SIZE 128
+
+#define PMU_UNIT_ID_IS_VALID(id)	\
+	(((id) < PMU_UNIT_END) || ((id) >= PMU_UNIT_TEST_START))
+
+#define PMU_CMD_FLAGS_STATUS		BIT(0)
+#define PMU_CMD_FLAGS_INTR		BIT(1)
+
+
+#define PMU_IS_COMMAND_QUEUE(id)	\
+	((id)  < PMU_MESSAGE_QUEUE)
+
+#define PMU_IS_SW_COMMAND_QUEUE(id)	\
+	(((id) == PMU_COMMAND_QUEUE_HPQ) || ((id) == PMU_COMMAND_QUEUE_LPQ))
+
+#define  PMU_IS_MESSAGE_QUEUE(id)	\
+	((id) == PMU_MESSAGE_QUEUE)
+
+#define QUEUE_ALIGNMENT		4
+
+#define PMU_CMD_HDR_SIZE	sizeof(struct pmu_hdr)
+#define PMU_MSG_HDR_SIZE	sizeof(struct pmu_hdr)
+
+
+
+static void
+pmu_copy_to_dmem(struct gm200_pmu *priv, u32 dst, void *src, u32 size)
+{
+	struct nvkm_device *device = priv->base.subdev.device;
+
+	mutex_lock(&priv->copy_lock);
+
+	nvkm_falcon_load_dmem(device, 0x10a000, src, dst, size);
+
+	mutex_unlock(&priv->copy_lock);
+}
+
+static void
+pmu_copy_from_dmem(struct gm200_pmu *priv, u32 src, void *dst, u32 size)
+{
+	struct nvkm_device *device = priv->base.subdev.device;
+
+	mutex_lock(&priv->copy_lock);
+
+	nvkm_falcon_read_dmem(device, 0x10a000, src, size, dst);
+
+	mutex_unlock(&priv->copy_lock);
+}
+
+static int
+pmu_seq_acquire(struct gm200_pmu *priv, struct gm200_pmu_sequence **pseq)
+{
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+	struct gm200_pmu_sequence *seq;
+	u32 index;
+
+	mutex_lock(&priv->seq_lock);
+	index = find_first_zero_bit(priv->seq_tbl, GM200_PMU_NUM_SEQUENCES);
+
+	if (index >= GM200_PMU_NUM_SEQUENCES) {
+		nvkm_error(subdev, "no free sequence available\n");
+		mutex_unlock(&priv->seq_lock);
+		return -EAGAIN;
+	}
+
+	set_bit(index, priv->seq_tbl);
+	mutex_unlock(&priv->seq_lock);
+	seq = &priv->seq[index];
+	seq->state = SEQ_STATE_PENDING;
+	*pseq = seq;
+
+	return 0;
+}
+
+static void
+pmu_seq_release(struct gm200_pmu *pmu, struct gm200_pmu_sequence *seq)
+{
+	seq->state = SEQ_STATE_FREE;
+	seq->callback = NULL;
+	seq->msg = NULL;
+	seq->completion = NULL;
+	clear_bit(seq->id, pmu->seq_tbl);
+}
+
+static int
+pmu_queue_head_get(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+		   u32 *head)
+{
+	struct nvkm_device *device = priv->base.subdev.device;
+
+	if (PMU_IS_COMMAND_QUEUE(queue->id))
+		*head = nvkm_rd32(device, 0x0010a4a0 + (queue->index * 4));
+	else
+		*head = nvkm_rd32(device, 0x0010a4c8);
+
+	return 0;
+}
+
+static int
+pmu_queue_head_set(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+		   u32 head)
+{
+	struct nvkm_device *device = priv->base.subdev.device;
+
+	if (PMU_IS_COMMAND_QUEUE(queue->id))
+		nvkm_wr32(device, 0x0010a4a0 + (queue->index * 4), head);
+	else
+		nvkm_wr32(device, 0x0010a4c8, head);
+
+	return 0;
+}
+
+static int
+pmu_queue_tail_get(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+		   u32 *tail)
+{
+	struct nvkm_device *device = priv->base.subdev.device;
+
+	if (PMU_IS_COMMAND_QUEUE(queue->id))
+		*tail = nvkm_rd32(device, 0x0010a4b0 + (queue->index * 4));
+	else
+		*tail = nvkm_rd32(device, 0x0010a4cc);
+
+	return 0;
+}
+
+static int
+pmu_queue_tail_set(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+		   u32 tail)
+{
+	struct nvkm_device *device = priv->base.subdev.device;
+
+	if (PMU_IS_COMMAND_QUEUE(queue->id))
+		nvkm_wr32(device, 0x0010a4b0 + (queue->index * 4), tail);
+	else
+		nvkm_wr32(device, 0x0010a4cc, tail);
+
+	return 0;
+}
+
+static int
+pmu_queue_lock(struct gm200_pmu_queue *queue)
+{
+	if (PMU_IS_MESSAGE_QUEUE(queue->id))
+		return 0;
+
+	if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) {
+		mutex_lock(&queue->mutex);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int
+pmu_queue_unlock(struct gm200_pmu_queue *queue)
+{
+	if (PMU_IS_MESSAGE_QUEUE(queue->id))
+		return 0;
+
+	if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) {
+		mutex_unlock(&queue->mutex);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/* called by pmu_read_message, no lock */
+static bool
+pmu_queue_is_empty(struct gm200_pmu *priv, struct gm200_pmu_queue *queue)
+{
+	u32 head, tail;
+
+	pmu_queue_head_get(priv, queue, &head);
+
+	if (queue->oflag == OFLAG_READ)
+		tail = queue->position;
+	else
+		pmu_queue_tail_get(priv, queue, &tail);
+
+	return head == tail;
+}
+
+static bool
+pmu_queue_has_room(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+			u32 size, bool *need_rewind)
+{
+	u32 head, tail, free;
+	bool rewind = false;
+
+	size = ALIGN(size, QUEUE_ALIGNMENT);
+
+	pmu_queue_head_get(priv, queue, &head);
+	pmu_queue_tail_get(priv, queue, &tail);
+
+	if (head >= tail) {
+		free = queue->offset + queue->size - head;
+		free -= PMU_CMD_HDR_SIZE;
+
+		if (size > free) {
+			rewind = true;
+			head = queue->offset;
+		}
+	}
+
+	if (head < tail)
+		free = tail - head - 1;
+
+	if (need_rewind)
+		*need_rewind = rewind;
+
+	return size <= free;
+}
+
+static int
+pmu_queue_push(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+	       void *data, u32 size)
+{
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+
+	if (queue->oflag != OFLAG_WRITE) {
+		nvkm_error(subdev, "queue not opened for write\n");
+		return -EINVAL;
+	}
+
+	pmu_copy_to_dmem(priv, queue->position, data, size);
+	queue->position += ALIGN(size, QUEUE_ALIGNMENT);
+
+	return 0;
+}
+
+static int
+pmu_queue_pop(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+			void *data, u32 size, u32 *bytes_read)
+{
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+	u32 head, tail, used;
+
+	*bytes_read = 0;
+
+	if (queue->oflag != OFLAG_READ) {
+		nvkm_error(subdev, "queue not opened for read\n");
+		return -EINVAL;
+	}
+
+	pmu_queue_head_get(priv, queue, &head);
+	if (head < queue->position)
+		queue->position = queue->offset;
+	tail = queue->position;
+
+	if (head == tail) {
+		*bytes_read = 0;
+		return 0;
+	}
+	used = head - tail;
+
+	if (size > used) {
+		nvkm_warn(subdev, "queue size smaller than read request\n");
+		size = used;
+	}
+
+	pmu_copy_from_dmem(priv, tail, data, size);
+	queue->position += ALIGN(size, QUEUE_ALIGNMENT);
+	*bytes_read = size;
+
+	return 0;
+}
+
+static void
+pmu_queue_rewind(struct gm200_pmu *priv, struct gm200_pmu_queue *queue)
+{
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+	struct pmu_hdr cmd;
+	int err;
+
+	if (queue->oflag == OFLAG_CLOSED) {
+		nvkm_error(subdev, "queue not opened\n");
+		return;
+	}
+
+	if (queue->oflag == OFLAG_WRITE) {
+		cmd.unit_id = PMU_UNIT_REWIND;
+		cmd.size = sizeof(cmd);
+		err = pmu_queue_push(priv, queue, &cmd, cmd.size);
+		if (err)
+			nvkm_error(subdev, "pmu_queue_push failed\n");
+
+		nvkm_debug(subdev, "queue %d rewinded\n", queue->id);
+	}
+
+	queue->position = queue->offset;
+}
+
+/* Open for read and lock the queue */
+static int
+pmu_queue_open_read(struct gm200_pmu *priv, struct gm200_pmu_queue *queue)
+{
+	int err;
+
+	err = pmu_queue_lock(queue);
+	if (err)
+		return err;
+
+	if (WARN_ON(queue->oflag != OFLAG_CLOSED)) {
+		pmu_queue_unlock(queue);
+		return -EBUSY;
+	}
+
+	pmu_queue_tail_get(priv, queue, &queue->position);
+	queue->oflag = OFLAG_READ;
+
+	return 0;
+}
+
+/**
+ * open for write and lock the queue
+ * make sure there's enough free space for the write
+ */
+static int
+pmu_queue_open_write(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+		     u32 size)
+{
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+	bool rewind = false;
+	int err;
+
+	err = pmu_queue_lock(queue);
+	if (err)
+		return err;
+
+	if (WARN_ON(queue->oflag != OFLAG_CLOSED)) {
+		pmu_queue_unlock(queue);
+		return -EBUSY;
+	}
+
+	if (!pmu_queue_has_room(priv, queue, size, &rewind)) {
+		nvkm_error(subdev, "queue full\n");
+		pmu_queue_unlock(queue);
+		return -EAGAIN;
+	}
+
+	pmu_queue_head_get(priv, queue, &queue->position);
+	queue->oflag = OFLAG_WRITE;
+
+	if (rewind)
+		pmu_queue_rewind(priv, queue);
+
+	return 0;
+}
+
+static int
+pmu_queue_close(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+		bool commit)
+{
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+
+	if (WARN_ON(queue->oflag == OFLAG_CLOSED)) {
+		nvkm_warn(subdev, "queue alpmu_ready closed\n");
+		return 0;
+	}
+
+	if (commit) {
+		if (queue->oflag == OFLAG_READ)
+			pmu_queue_tail_set(priv, queue, queue->position);
+		else
+			pmu_queue_head_set(priv, queue, queue->position);
+	}
+
+	queue->oflag = OFLAG_CLOSED;
+	pmu_queue_unlock(queue);
+
+	return 0;
+}
+
+static bool
+pmu_check_cmd_params(struct gm200_pmu *priv, struct pmu_hdr *cmd,
+		     struct pmu_hdr *msg, u32 queue_id)
+{
+	struct gm200_pmu_queue *queue;
+
+	if (!PMU_IS_SW_COMMAND_QUEUE(queue_id))
+		return false;
+
+	queue = &priv->queue[queue_id];
+	if (cmd->size < PMU_CMD_HDR_SIZE)
+		return false;
+
+	if (cmd->size > (queue->size / 2))
+		return false;
+
+	if (msg != NULL && msg->size < PMU_MSG_HDR_SIZE)
+		return false;
+
+	if (!PMU_UNIT_ID_IS_VALID(cmd->unit_id))
+		return false;
+
+	return true;
+}
+
+static int
+pmu_cmd_write(struct gm200_pmu *priv, struct pmu_hdr *cmd, u32 queue_id)
+{
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+	static unsigned long timeout = ~0;
+	unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+	int err = -EAGAIN;
+	int ret = 0;
+	bool commit = true;
+	struct gm200_pmu_queue *queue;
+
+	queue = &priv->queue[queue_id];
+
+	while (err == -EAGAIN && time_before(jiffies, end_jiffies))
+		err = pmu_queue_open_write(priv, queue, cmd->size);
+	if (err) {
+		nvkm_error(subdev, "pmu_queue_open_write failed\n");
+		return err;
+	}
+
+	err = pmu_queue_push(priv, queue, cmd, cmd->size);
+	if (err) {
+		nvkm_error(subdev, "pmu_queue_push failed\n");
+		ret = err;
+		commit = false;
+	}
+
+	err = pmu_queue_close(priv, queue, commit);
+	if (err) {
+		nvkm_error(subdev, "fail to close queue-id %d\n", queue_id);
+		ret = err;
+	}
+
+	return ret;
+}
+
+int
+nv_pmu_cmd_post(struct nvkm_pmu *pmu, struct pmu_hdr *cmd, struct pmu_hdr *msg,
+	     enum nv_pmu_queue queue_id, nv_pmu_callback callback,
+	     struct completion *completion)
+{
+	struct gm200_pmu *priv = gm200_pmu(pmu);
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct gm200_pmu_sequence *seq;
+	int err;
+
+	if (WARN_ON(!priv->ready))
+		return -EINVAL;
+
+	if (!pmu_check_cmd_params(priv, cmd, msg, queue_id)) {
+		nvkm_error(subdev, "invalid pmu cmd :\n"
+			"queue_id=%d,\n"
+			"cmd_size=%d, cmd_unit_id=%d, msg=%p\n",
+			queue_id, cmd->size, cmd->unit_id, msg);
+		return -EINVAL;
+	}
+
+	err = pmu_seq_acquire(priv, &seq);
+	if (err)
+		return err;
+
+	cmd->seq_id = seq->id;
+	cmd->ctrl_flags = PMU_CMD_FLAGS_STATUS | PMU_CMD_FLAGS_INTR;
+
+	seq->callback = callback;
+	seq->msg = msg;
+	seq->state = SEQ_STATE_USED;
+	seq->completion = completion;
+
+	err = pmu_cmd_write(priv, cmd, queue_id);
+	if (err) {
+		seq->state = SEQ_STATE_PENDING;
+		pmu_seq_release(priv, seq);
+	}
+
+	return err;
+}
+
+static bool
+pmu_msg_read(struct gm200_pmu *priv, struct gm200_pmu_queue *queue,
+	     struct pmu_hdr *hdr)
+{
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+	bool commit = true;
+	int status = 0;
+	u32 read_size, bytes_read;
+	int err;
+
+
+	if (pmu_queue_is_empty(priv, queue))
+		return false;
+
+	err = pmu_queue_open_read(priv, queue);
+	if (err) {
+		nvkm_error(subdev, "fail to open queue %d\n", queue->id);
+		status |= err;
+		return false;
+	}
+
+	err = pmu_queue_pop(priv, queue, hdr, PMU_MSG_HDR_SIZE, &bytes_read);
+	if (err || (bytes_read != PMU_MSG_HDR_SIZE)) {
+		nvkm_error(subdev, "fail to read from queue %d\n", queue->id);
+		status |= -EINVAL;
+		commit = false;
+		goto close;
+	}
+
+	if (!PMU_UNIT_ID_IS_VALID(hdr->unit_id)) {
+		nvkm_error(subdev, "invalid unit_id %d\n", hdr->unit_id);
+		status |= -EINVAL;
+		commit = false;
+		goto close;
+	}
+
+	if (hdr->size > PMU_MSG_BUF_SIZE) {
+		nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
+		return -ENOSPC;
+	}
+
+	if (hdr->size > PMU_MSG_HDR_SIZE) {
+		read_size = hdr->size - PMU_MSG_HDR_SIZE;
+		err = pmu_queue_pop(priv, queue, (hdr + 1), read_size,
+				    &bytes_read);
+		if (err || (bytes_read != read_size)) {
+			nvkm_error(subdev, "fail to read from queue/n");
+			status |= err;
+			commit = false;
+			goto close;
+		}
+	}
+
+close:
+	err = pmu_queue_close(priv, queue, commit);
+	if (err) {
+		nvkm_error(subdev, "fail to close queue %d", queue->id);
+		status |= err;
+	}
+
+	if (status)
+		return false;
+
+	return true;
+}
+
+static int
+pmu_msg_handle(struct gm200_pmu *priv, struct pmu_hdr *hdr)
+{
+	struct nvkm_pmu *pmu = &priv->base;
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+	struct gm200_pmu_sequence *seq;
+
+	seq = &priv->seq[hdr->seq_id];
+	if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
+		nvkm_error(subdev, "msg for an unknown sequence %d", seq->id);
+		return -EINVAL;
+	}
+
+	if (seq->state == SEQ_STATE_USED) {
+		if (seq->callback)
+			seq->callback(pmu, hdr);
+	}
+
+	if (seq->completion)
+		complete(seq->completion);
+
+	pmu_seq_release(priv, seq);
+
+	return 0;
+}
+
+static int
+gm200_pmu_handle_init_msg(struct nvkm_pmu *pmu, struct pmu_hdr *hdr)
+{
+	struct gm200_pmu *priv = gm200_pmu(pmu);
+	struct nvkm_subdev *subdev = &priv->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 tail;
+	int ret, i;
+
+	/*
+	 * Read the message - queues are not initialized yet so we cannot rely
+	 * on pmu_msg_read
+	 */
+	tail = nvkm_rd32(device, 0x0010a4cc);
+	pmu_copy_from_dmem(priv, tail, hdr, PMU_MSG_HDR_SIZE);
+
+	if (hdr->unit_id != PMU_UNIT_INIT) {
+		nvkm_error(subdev, "expected message from PMU\n");
+		return -EINVAL;
+	}
+
+	if (hdr->size > PMU_MSG_BUF_SIZE) {
+		nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
+		return -ENOSPC;
+	}
+
+	pmu_copy_from_dmem(priv, tail + PMU_MSG_HDR_SIZE, (hdr + 1),
+			   hdr->size - PMU_MSG_HDR_SIZE);
+
+	tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
+	nvkm_wr32(device, 0x0010a4cc, tail);
+
+	ret = pmu->nv_func->init->init_callback(pmu, hdr);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < GM200_PMU_QUEUE_COUNT; i++) {
+		struct gm200_pmu_queue *queue = &priv->queue[i];
+
+		nvkm_debug(subdev,
+			   "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
+			   i, queue->index, queue->offset, queue->size);
+	}
+
+	priv->ready = true;
+
+	/* Complete PMU initialization by initializing WPR region */
+	pmu->nv_func->acr->init_wpr_region(pmu);
+
+	return 0;
+}
+
+static void
+gm200_pmu_recv(struct nvkm_pmu *pmu)
+{
+	struct gm200_pmu *priv = gm200_pmu(pmu);
+	/*
+	 * We are invoked from a worker thread, so normally we have plenty of
+	 * stack space to work with.
+	 */
+	u8 msg_buffer[PMU_MSG_BUF_SIZE];
+	struct pmu_hdr *hdr = (void *)msg_buffer;
+
+	mutex_lock(&priv->isr_mutex);
+
+	if ((!priv->ready))
+		gm200_pmu_handle_init_msg(pmu, hdr);
+	else while (pmu_msg_read(priv, &priv->queue[PMU_MESSAGE_QUEUE], hdr))
+		pmu_msg_handle(priv, hdr);
+
+	mutex_unlock(&priv->isr_mutex);
+}
+
+static int
+gm200_pmu_init(struct nvkm_pmu *pmu)
+{
+	struct gm200_pmu *priv = (struct gm200_pmu *)pmu;
+
+	priv->ready = false;
+	reinit_completion(&priv->init_done);
+
+	return 0;
+}
+
+static const struct nvkm_pmu_func
+gm200_pmu = {
+	.init = gm200_pmu_init,
+	.intr = gt215_pmu_intr,
+	.recv = gm200_pmu_recv,
+};
+
+int
+gm200_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	struct gm200_pmu *priv;
+	int i;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		priv = NULL;
+		return -ENOMEM;
+	}
+	*ppmu = &priv->base;
+
+	nvkm_pmu_ctor(&gm200_pmu, device, index, &priv->base);
+
+	mutex_init(&priv->isr_mutex);
+	mutex_init(&priv->seq_lock);
+	mutex_init(&priv->copy_lock);
+
+	for (i = 0; i < GM200_PMU_NUM_SEQUENCES; i++)
+		priv->seq[i].id = i;
+
+	init_completion(&priv->init_done);
+
+	return 0;
+}
diff --git a/drm/nouveau/nvkm/subdev/pmu/gm200.h b/drm/nouveau/nvkm/subdev/pmu/gm200.h
new file mode 100644
index 000000000000..7b5d2e88b379
--- /dev/null
+++ b/drm/nouveau/nvkm/subdev/pmu/gm200.h
@@ -0,0 +1,104 @@
+
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __NVKM_PMU_GM200_H_
+#define __NVKM_PMU_GM200_H_
+
+#include "priv.h"
+#include "nv_pmu.h"
+
+enum gm200_seq_state {
+	SEQ_STATE_FREE = 0,
+	SEQ_STATE_PENDING,
+	SEQ_STATE_USED,
+	SEQ_STATE_CANCELLED
+};
+
+struct gm200_pmu_sequence {
+	u16 id;
+	enum gm200_seq_state state;
+	struct pmu_hdr *msg;
+	nv_pmu_callback callback;
+	struct completion *completion;
+};
+
+/**
+ * Structure pmu_queue
+ * mutex_lock         - used by sw, for LPQ/HPQ queue
+ * position           - current write position
+ * offset             - physical dmem offset where this queue begins
+ * id                 - logical queue identifier
+ * index              - physical queue index
+ * size               - in bytes
+ * oflag              - flag to indentify open mode
+ */
+struct gm200_pmu_queue {
+	struct mutex mutex;
+	u32 id;
+	u32 index;
+	u32 offset;
+	u32 size;
+	u32 position;
+	enum {
+		OFLAG_CLOSED = 0,
+		OFLAG_READ,
+		OFLAG_WRITE,
+	} oflag;
+};
+
+#define GM200_PMU_QUEUE_COUNT		5
+#define GM200_PMU_NUM_SEQUENCES		256
+
+struct gm200_pmu {
+	struct nvkm_pmu base;
+	bool ready;
+	struct completion init_done;
+	struct mutex isr_mutex;
+	struct mutex seq_lock;
+	struct mutex copy_lock;
+	struct gm200_pmu_queue queue[GM200_PMU_QUEUE_COUNT];
+	struct gm200_pmu_sequence seq[GM200_PMU_NUM_SEQUENCES];
+	unsigned long seq_tbl[BITS_TO_LONGS(GM200_PMU_NUM_SEQUENCES)];
+};
+#define gm200_pmu(ptr) container_of(ptr, struct gm200_pmu, base)
+
+/**
+ * Structure pmu_hdr  - struct for cmd(that we send) or msg(that we receive).
+ * unit_id            - Comp in PMU to/from which cmd sent or msg received.
+ * size               - Total size of pmu cmd or pmu msg.
+ * ctrl_flags         - Flag to indicate type of msg/cmd.
+ * seq_id             - Sequence id to match a pmu msg to pmu cmd.
+ */
+struct pmu_hdr {
+	u8 unit_id;
+	u8 size;
+	u8 ctrl_flags;
+	u8 seq_id;
+};
+
+struct pmu_msg_base {
+	struct pmu_hdr hdr;
+	u8 msg_type;
+};
+
+#endif
diff --git a/drm/nouveau/nvkm/subdev/pmu/nv_pmu.h b/drm/nouveau/nvkm/subdev/pmu/nv_pmu.h
new file mode 100644
index 000000000000..4267952231f7
--- /dev/null
+++ b/drm/nouveau/nvkm/subdev/pmu/nv_pmu.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __NVKM_PMU_NV_PMU_H_
+#define __NVKM_PMU_NV_PMU_H_
+
+typedef void (*nv_pmu_callback)(struct nvkm_pmu *, struct pmu_hdr *);
+
+/* Units we can communicate with using the PMU interface */
+enum nv_pmu_unit {
+	PMU_UNIT_REWIND = 0x00,
+	PMU_UNIT_INIT = 0x07,
+	PMU_UNIT_ACR = 0x0a,
+	PMU_UNIT_END = 0x23,
+	PMU_UNIT_TEST_START = 0xfe,
+};
+
+/* Queues identifiers */
+enum nv_pmu_queue {
+	/* High Priority Command Queue for Host -> PMU communication */
+	PMU_COMMAND_QUEUE_HPQ = 0,
+	/* Low Priority Command Queue for Host -> PMU communication */
+	PMU_COMMAND_QUEUE_LPQ = 1,
+	/* Message queue for PMU -> Host communication */
+	PMU_MESSAGE_QUEUE = 4,
+};
+
+int nv_pmu_cmd_post(struct nvkm_pmu *, struct pmu_hdr *, struct pmu_hdr *,
+		    enum nv_pmu_queue, nv_pmu_callback, struct completion *);
+
+#endif
diff --git a/drm/nouveau/nvkm/subdev/pmu/priv.h b/drm/nouveau/nvkm/subdev/pmu/priv.h
index 12b81ae1b114..b93b300a101a 100644
--- a/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -9,6 +9,8 @@ void nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *, int,
 int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
 		  int index, struct nvkm_pmu **);
 
+struct pmu_hdr;
+
 struct nvkm_pmu_func {
 	struct {
 		u32 *data;
@@ -37,5 +39,21 @@ void gt215_pmu_intr(struct nvkm_pmu *);
 void gt215_pmu_recv(struct nvkm_pmu *);
 int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
 
+struct nv_pmu_init_func {
+	u32 cmdline_size;
+	void (*gen_cmdline)(struct nvkm_pmu *, void *buf);
+	int (*init_callback)(struct nvkm_pmu *, struct pmu_hdr *);
+};
+
+struct nv_pmu_acr_func {
+	int (*init_wpr_region)(struct nvkm_pmu *pmu);
+	int (*boot_falcon)(struct nvkm_pmu *, enum nvkm_falconidx);
+};
+
+struct nv_pmu_func {
+	const struct nv_pmu_init_func *init;
+	const struct nv_pmu_acr_func *acr;
+};
+
 void gk110_pmu_pgob(struct nvkm_pmu *, bool);
 #endif
-- 
git-series 0.8.10


More information about the Nouveau mailing list