[Freedreno] [PATCH 3/4] drm/msm/A6xx: Implement preemption for A6XX targets
Jordan Crouse
jcrouse at codeaurora.org
Tue Apr 3 21:19:40 UTC 2018
On Mon, Feb 26, 2018 at 01:08:22PM +0530, Sharat Masetty wrote:
> This patch implements preemption feature for A6xx targets, this allows
> the GPU to switch to a higher priority ringbuffer if one is ready. A6XX
> hardware as such supports multiple levels of preemption granularities,
> ranging from coarse grained(ringbuffer level) to a more fine grained
> such as draw-call level or a bin boundary level preemption. This patch
> enables the basic preemption level, with more fine grained preemption
> support to follow.
Reviewed-by: Jordan Crouse <jcrouse at codeaurora.org>
> Signed-off-by: Sharat Masetty <smasetty at codeaurora.org>
> ---
> drivers/gpu/drm/msm/Makefile | 1 +
> drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 44 ++++
> drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 146 +++++++++++-
> drivers/gpu/drm/msm/adreno/a6xx_gpu.h | 136 +++++++++++
> drivers/gpu/drm/msm/adreno/a6xx_preempt.c | 383 ++++++++++++++++++++++++++++++
> 5 files changed, 708 insertions(+), 2 deletions(-)
> create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_preempt.c
>
> diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
> index 0b6e150..1978312 100644
> --- a/drivers/gpu/drm/msm/Makefile
> +++ b/drivers/gpu/drm/msm/Makefile
> @@ -13,6 +13,7 @@ msm-y := \
> adreno/a6xx_gpu.o \
> adreno/a6xx_gmu.o \
> adreno/a6xx_hfi.o \
> + adreno/a6xx_preempt.o \
> hdmi/hdmi.o \
> hdmi/hdmi_audio.o \
> hdmi/hdmi_bridge.o \
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> index 8d732e0..5c2a68a 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> @@ -1145,6 +1145,50 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
> iommu_domain_free(gmu->domain);
> }
>
> +#define A6XX_GMU_FENCED_WRITE_SLEEP_US 10 /* Sleep time between reads in us */
> +#define A6XX_GMU_FENCED_WRITE_TIMEOUT 600 /* Timeout in us */
> +int a6xx_gmu_fenced_write(struct a6xx_gpu *a6xx_gpu, unsigned int reg,
> + unsigned int value, unsigned int fence_mask)
> +{
> + struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
> + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
> + struct msm_gpu *gpu = &adreno_gpu->base;
> + unsigned int status;
> + ktime_t timeout = ktime_add_us(ktime_get(),
> + A6XX_GMU_FENCED_WRITE_TIMEOUT);
> +
> + /* Write to the GPU register */
> + gpu_write(gpu, reg, value);
> +
> + might_sleep_if(A6XX_GMU_FENCED_WRITE_SLEEP_US);
> + for (;;) {
> + status = gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS);
> + /*
> + * If no bits of the fence_mask are set in the status, then the
> + * write was successful
> + */
> + if (!(status & fence_mask))
> + return 0;
> +
> + if (ktime_compare(ktime_get(), timeout) > 0) {
> + /* Timed out, but check one last time */
> + status = gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS);
> + if (!(status & fence_mask))
> + return 0;
> +
> + break;
> + }
> +
> + usleep_range((A6XX_GMU_FENCED_WRITE_SLEEP_US >> 2) + 1,
> + A6XX_GMU_FENCED_WRITE_SLEEP_US);
> +
> + /* Try writing again */
> + gpu_write(gpu, reg, value);
> + }
> +
> + return -ETIMEDOUT;
> +}
> +
> int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
> {
> struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> index c72434b..b1a80ec 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> @@ -151,6 +151,8 @@ bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
>
> static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
> {
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> uint32_t wptr;
> unsigned long flags;
>
> @@ -167,16 +169,52 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
> /* Make sure everything is posted before making a decision */
> mb();
>
> - gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
> + /* Update HW if this is the current ring and we are not in preempt*/
> + if (a6xx_gpu->cur_ring == ring && !a6xx_in_preempt(a6xx_gpu))
> + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
> }
>
> static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
> struct msm_file_private *ctx)
> {
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> struct msm_drm_private *priv = gpu->dev->dev_private;
> struct msm_ringbuffer *ring = submit->ring;
> + uint64_t scratch_dest = SCRATCH_USER_CTX_IOVA(ring->id, a6xx_gpu);
> unsigned int i;
>
> + /*
> + * If preemption is enabled, then set the pseudo register for the save
> + * sequence
> + */
> + if (gpu->nr_rings > 1) {
> + struct msm_gpu_submitqueue *queue = submit->queue;
> +
> + OUT_PKT7(ring, CP_SET_PSEUDO_REG, 6);
> +
> + /* privileged and non secure buffer save */
> + OUT_RING(ring, SAVE_REG_PRIV_NON_SECURE_SAVE_ADDR_ID);
> + OUT_RING(ring, lower_32_bits(a6xx_gpu->preempt_iova[ring->id]));
> + OUT_RING(ring, upper_32_bits(a6xx_gpu->preempt_iova[ring->id]));
> +
> + /* user context buffer save */
> + OUT_RING(ring, SAVE_REG_NON_PRIV_SAVE_ADDR_ID);
> + OUT_RING(ring, lower_32_bits(queue->bo_iova));
> + OUT_RING(ring, upper_32_bits(queue->bo_iova));
> +
> + /*
> + * Ask CP to save the user context buffer's iova address to a
> + * scratch memory region, this is needed if the CP preempts
> + * this ring in between this submit's IB list.
> + */
> + OUT_PKT7(ring, CP_MEM_WRITE, 4);
> + OUT_RING(ring, lower_32_bits(scratch_dest));
> + OUT_RING(ring, upper_32_bits(scratch_dest));
> + OUT_RING(ring, lower_32_bits(queue->bo_iova));
> + OUT_RING(ring, upper_32_bits(queue->bo_iova));
> + }
> +
> /* Invalidate CCU depth and color */
> OUT_PKT7(ring, CP_EVENT_WRITE, 1);
> OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
> @@ -184,6 +222,14 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
> OUT_PKT7(ring, CP_EVENT_WRITE, 1);
> OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
>
> + if (gpu->nr_rings > 1) {
> + /* Indicate the beginning of IB list, this will implicitly turn
> + * on preemption.
> + */
> + OUT_PKT7(ring, CP_SET_MARKER, 1);
> + OUT_RING(ring, 0XD);
> + }
> +
> /* Submit the commands */
> for (i = 0; i < submit->nr_cmds; i++) {
> switch (submit->cmd[i].type) {
> @@ -201,6 +247,14 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
> }
> }
>
> + if (gpu->nr_rings > 1) {
> + /* Indicate the end of IB list, this will implicitly turn
> + * off preemption.
> + */
> + OUT_PKT7(ring, CP_SET_MARKER, 1);
> + OUT_RING(ring, 0XE);
> + }
> +
> /* Write the fence to the scratch register */
> OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
> OUT_RING(ring, submit->seqno);
> @@ -215,7 +269,38 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
> OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
> OUT_RING(ring, submit->seqno);
>
> + /* If preemption is enabled */
> + if (gpu->nr_rings > 1) {
> + /*
> + * Reset the scratch region as we are done with the
> + * IB list of this submission
> + */
> + OUT_PKT7(ring, CP_MEM_WRITE, 4);
> + OUT_RING(ring, lower_32_bits(scratch_dest));
> + OUT_RING(ring, upper_32_bits(scratch_dest));
> + OUT_RING(ring, 0x00);
> + OUT_RING(ring, 0x00);
> +
> + /* Yield the floor on command completion */
> + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
> +
> + /*
> + * If dword[2:1] are non zero, they specify an address for
> + * the CP to write the value of dword[3] to on preemption
> + * complete. Write 0 to skip the write
> + */
> + OUT_RING(ring, 0x00);
> + OUT_RING(ring, 0x00);
> + /* Data value - not used if the address above is 0 */
> + OUT_RING(ring, 0x01);
> + /* generate interrupt on preemption completion */
> + OUT_RING(ring, 0x00);
> + }
> +
> a6xx_flush(gpu, ring);
> +
> + /* Check to see if we need to start preemption */
> + a6xx_preempt_trigger(gpu);
> }
>
> static const struct {
> @@ -413,11 +498,54 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
> return 0;
> }
>
> +static int a6xx_preempt_start(struct msm_gpu *gpu)
> +{
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> + struct msm_ringbuffer *ring = gpu->rb[0];
> +
> + if (gpu->nr_rings <= 1)
> + return 0;
> +
> + /* Turn CP protection off */
> + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
> + OUT_RING(ring, 0);
> +
> + OUT_PKT7(ring, CP_SET_PSEUDO_REG, 6);
> +
> + /* Privileged and non-secure */
> + OUT_RING(ring, SAVE_REG_PRIV_NON_SECURE_SAVE_ADDR_ID);
> + OUT_RING(ring, lower_32_bits(a6xx_gpu->preempt_iova[ring->id]));
> + OUT_RING(ring, upper_32_bits(a6xx_gpu->preempt_iova[ring->id]));
> +
> + /* Privileged and secure. We don't do secure yet, so reset to 0 */
> + OUT_RING(ring, SAVE_REG_PRIV_SECURE_SAVE_ADDR_ID);
> + OUT_RING(ring, 0x00);
> + OUT_RING(ring, 0x00);
> +
> + /* Turn CP protection back on */
> + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
> + OUT_RING(ring, 1);
> +
> + /* Yield the floor on command completion */
> + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
> + OUT_RING(ring, 0x00);
> + OUT_RING(ring, 0x00);
> + OUT_RING(ring, 0x00);
> + /* Generate interrupt on preemption completion */
> + OUT_RING(ring, 0x00);
> +
> + gpu->funcs->flush(gpu, ring);
> +
> + return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
> +}
> +
> #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
> A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
> A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
> A6XX_RBBM_INT_0_MASK_CP_IB2 | \
> A6XX_RBBM_INT_0_MASK_CP_IB1 | \
> + A6XX_RBBM_INT_0_MASK_CP_SW | \
> A6XX_RBBM_INT_0_MASK_CP_RB | \
> A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
> A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
> @@ -557,6 +685,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
> if (ret)
> goto out;
>
> + a6xx_preempt_hw_init(gpu);
> +
> ret = a6xx_ucode_init(gpu);
> if (ret)
> goto out;
> @@ -586,6 +716,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
> gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
> }
>
> + /* Last step - yield the ringbuffer */
> + a6xx_preempt_start(gpu);
> out:
> /*
> * Tell the GMU that we are done touching the GPU and it can start power
> @@ -731,8 +863,13 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
> if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
> dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
>
> - if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
> + if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
> msm_gpu_retire(gpu);
> + a6xx_preempt_trigger(gpu);
> + }
> +
> + if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
> + a6xx_preempt_irq(gpu);
>
> return IRQ_HANDLED;
> }
> @@ -874,6 +1011,8 @@ static void a6xx_destroy(struct msm_gpu *gpu)
> .active_ring = a6xx_active_ring,
> .irq = a6xx_irq,
> .destroy = a6xx_destroy,
> + .submitqueue_setup = a6xx_preempt_submitqueue_setup,
> + .submitqueue_close = a6xx_preempt_submitqueue_close,
> #ifdef CONFIG_DEBUG_FS
> .show = a6xx_show,
> #endif
> @@ -923,5 +1062,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
> msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
> a6xx_fault_handler);
>
> + /* Set up the preemption specific bits and pieces for each ringbuffer */
> + a6xx_preempt_init(gpu);
> +
> return gpu;
> }
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
> index 06e93fd..aca1d7d 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
> @@ -28,12 +28,126 @@ struct a6xx_gpu {
> uint64_t sqe_iova;
>
> struct msm_ringbuffer *cur_ring;
> + struct msm_ringbuffer *next_ring;
> +
> + struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
> + struct a6xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
> + uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
> +
> + atomic_t preempt_state;
> + struct timer_list preempt_timer;
> +
> + unsigned int preempt_level;
> + bool uses_gmem;
> + bool skip_save_restore;
> +
> + struct drm_gem_object *scratch_bo;
> + void *scratch_ptr;
> + uint64_t scratch_iova;
>
> struct a6xx_gmu gmu;
> };
>
> #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
>
> +#define SCRATCH_USER_CTX_IOVA(ring_id, a6xx_gpu) \
> + (a6xx_gpu->scratch_iova + (ring_id * sizeof(uint64_t)))
> +
> +/*
> + * In order to do lockless preemption we use a simple state machine to progress
> + * through the process.
> + *
> + * PREEMPT_NONE - no preemption in progress. Next state START.
> + * PREEMPT_START - The trigger is evaluating if preemption is possible. Next
> + * states: TRIGGERED, NONE
> + * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
> + * state: NONE.
> + * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
> + * states: FAULTED, PENDING
> + * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
> + * recovery. Next state: N/A
> + * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
> + * checking the success of the operation. Next state: FAULTED, NONE.
> + */
> +
> +enum a6xx_preempt_state {
> + PREEMPT_NONE = 0,
> + PREEMPT_START,
> + PREEMPT_ABORT,
> + PREEMPT_TRIGGERED,
> + PREEMPT_FAULTED,
> + PREEMPT_PENDING,
> +};
> +
> +/*
> + * ID values used by SET_PSEUDO_REG PM4 command. These determine which of the
> + * various internal CP registers to write to. Used in the save/restore
> + * preemption sequence.
> + */
> +enum a6xx_set_pseudo_register {
> + SAVE_REG_SMMU_INFO_ID = 0,
> + SAVE_REG_PRIV_NON_SECURE_SAVE_ADDR_ID = 1,
> + SAVE_REG_PRIV_SECURE_SAVE_ADDR_ID = 2,
> + SAVE_REG_NON_PRIV_SAVE_ADDR_ID = 3,
> + SAVE_REG_COUNTER_ID = 4,
> +};
> +
> +/*
> + * struct a6xx_preempt_record is a shared buffer between the microcode and the
> + * CPU to store the state for preemption. The record itself is much larger
> + * (2112k) but most of that is used by the CP for storage.
> + *
> + * There is a preemption record assigned per ringbuffer. When the CPU triggers a
> + * preemption, it fills out the record with the useful information (wptr, ring
> + * base, etc) and the microcode uses that information to set up the CP following
> + * the preemption. When a ring is switched out, the CP will save the ringbuffer
> + * state back to the record. In this way, once the records are properly set up
> + * the CPU can quickly switch back and forth between ringbuffers by only
> + * updating a few registers (often only the wptr).
> + *
> + * These are the CPU aware registers in the record:
> + * @magic: Must always be 0xAE399D6EUL
> + * @info: Type of the record - written 0 by the CPU, updated by the CP
> + * @errno: preemption error record
> + * @data: Data field in YIELD and SET_MARKER packets, Written and used by CP
> + * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
> + * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
> + * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
> + * @_pad: Reserved/padding
> + * @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
> + * @rbase: Value of RB_BASE written by CPU, save/restored by CP
> + * @counter: GPU address of the storage area for the preemption counters
> + */
> +struct a6xx_preempt_record {
> + u32 magic;
> + u32 info;
> + u32 errno;
> + u32 data;
> + u32 cntl;
> + u32 rptr;
> + u32 wptr;
> + u32 _pad;
> + u64 rptr_addr;
> + u64 rbase;
> + u64 counter;
> +};
> +
> +#define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL
> +
> +/*
> + * Even though the structure above is only a few bytes, we need a full 2112k to
> + * store the entire preemption record from the CP
> + */
> +#define A6XX_PREEMPT_RECORD_SIZE (2112 * 1024)
> +
> +/*
> + * The preemption counter block is a storage area for the value of the
> + * preemption counters that are saved immediately before context switch. We
> + * append it on to the end of the allocation for the preemption record.
> + */
> +#define A6XX_PREEMPT_COUNTER_SIZE (16 * 4)
> +
> +#define A6XX_PREEMPT_USER_RECORD_SIZE (192 * 1024)
> /*
> * Given a register and a count, return a value to program into
> * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
> @@ -66,4 +180,26 @@ struct a6xx_gpu {
> int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
> void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
>
> +int a6xx_gmu_fenced_write(struct a6xx_gpu *a6xx_gpu, unsigned int reg,
> + unsigned int value, unsigned int fence_mask);
> +
> +void a6xx_preempt_init(struct msm_gpu *gpu);
> +void a6xx_preempt_hw_init(struct msm_gpu *gpu);
> +void a6xx_preempt_trigger(struct msm_gpu *gpu);
> +void a6xx_preempt_irq(struct msm_gpu *gpu);
> +void a6xx_preempt_fini(struct msm_gpu *gpu);
> +int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu,
> + struct msm_gpu_submitqueue *queue);
> +void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu,
> + struct msm_gpu_submitqueue *queue);
> +
> +/* Return true if we are in a preempt state */
> +static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu)
> +{
> + int preempt_state = atomic_read(&a6xx_gpu->preempt_state);
> +
> + return !(preempt_state == PREEMPT_NONE ||
> + preempt_state == PREEMPT_ABORT);
> +}
> +
> #endif /* __A6XX_GPU_H__ */
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
> new file mode 100644
> index 0000000..60df6c5
> --- /dev/null
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
> @@ -0,0 +1,383 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
> +
> +#include "msm_gem.h"
> +#include "a6xx_gpu.h"
> +#include "a6xx_gmu.xml.h"
> +
> +/*
> + * Try to transition the preemption state from old to new. Return
> + * true on success or false if the original state wasn't 'old'
> + */
> +static inline bool try_preempt_state(struct a6xx_gpu *a6xx_gpu,
> + enum a6xx_preempt_state old, enum a6xx_preempt_state new)
> +{
> + enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state,
> + old, new);
> +
> + return (cur == old);
> +}
> +
> +/*
> + * Force the preemption state to the specified state. This is used in cases
> + * where the current state is known and won't change
> + */
> +static inline void set_preempt_state(struct a6xx_gpu *gpu,
> + enum a6xx_preempt_state new)
> +{
> + /*
> + * preempt_state may be read by other cores trying to trigger a
> + * preemption or in the interrupt handler so barriers are needed
> + * before...
> + */
> + smp_mb__before_atomic();
> + atomic_set(&gpu->preempt_state, new);
> + /* ... and after*/
> + smp_mb__after_atomic();
> +}
> +
> +/* Write the most recent wptr for the given ring into the hardware */
> +static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
> +{
> + unsigned long flags;
> + uint32_t wptr;
> +
> + if (!ring)
> + return;
> +
> + spin_lock_irqsave(&ring->lock, flags);
> + wptr = get_wptr(ring);
> + spin_unlock_irqrestore(&ring->lock, flags);
> +
> + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
> +}
> +
> +/* Return the highest priority ringbuffer with something in it */
> +static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
> +{
> + unsigned long flags;
> + int i;
> +
> + for (i = 0; i < gpu->nr_rings; i++) {
> + bool empty;
> + struct msm_ringbuffer *ring = gpu->rb[i];
> +
> + spin_lock_irqsave(&ring->lock, flags);
> + empty = (get_wptr(ring) == ring->memptrs->rptr);
> + spin_unlock_irqrestore(&ring->lock, flags);
> +
> + if (!empty)
> + return ring;
> + }
> +
> + return NULL;
> +}
> +
> +static void a6xx_preempt_timer(unsigned long data)
> +{
> + struct a6xx_gpu *a6xx_gpu = (struct a6xx_gpu *) data;
> + struct msm_gpu *gpu = &a6xx_gpu->base.base;
> + struct drm_device *dev = gpu->dev;
> + struct msm_drm_private *priv = dev->dev_private;
> +
> + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
> + return;
> +
> + dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
> + queue_work(priv->wq, &gpu->recover_work);
> +}
> +
> +void a6xx_preempt_irq(struct msm_gpu *gpu)
> +{
> + uint32_t status;
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> + struct drm_device *dev = gpu->dev;
> + struct msm_drm_private *priv = dev->dev_private;
> +
> + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
> + return;
> +
> + /* Delete the preemption watchdog timer */
> + del_timer(&a6xx_gpu->preempt_timer);
> +
> + /*
> + * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL
> + * to zero before firing the interrupt, but there is a non zero chance
> + * of a hardware condition or a software race that could set it again
> + * before we have a chance to finish. If that happens, log and go for
> + * recovery
> + */
> + status = gpu_read(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL);
> + if (unlikely(status & 0x1)) {
> + set_preempt_state(a6xx_gpu, PREEMPT_FAULTED);
> + dev_err(dev->dev, "%s: Preemption failed to complete\n",
> + gpu->name);
> + queue_work(priv->wq, &gpu->recover_work);
> + return;
> + }
> +
> + a6xx_gpu->cur_ring = a6xx_gpu->next_ring;
> + a6xx_gpu->next_ring = NULL;
> +
> + update_wptr(gpu, a6xx_gpu->cur_ring);
> +
> + set_preempt_state(a6xx_gpu, PREEMPT_NONE);
> +}
> +
> +void a6xx_preempt_hw_init(struct msm_gpu *gpu)
> +{
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> + int i;
> +
> + /* No preemption if we only have one ring */
> + if (gpu->nr_rings == 1)
> + return;
> +
> + for (i = 0; i < gpu->nr_rings; i++) {
> + a6xx_gpu->preempt[i]->wptr = 0;
> + a6xx_gpu->preempt[i]->rptr = 0;
> + a6xx_gpu->preempt[i]->info = 0;
> + a6xx_gpu->preempt[i]->data = 0;
> + a6xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
> + }
> +
> + /* Write a 0 to signal that we aren't switching pagetables */
> + gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
> + REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
> +
> + /* Enable the GMEM save/restore feature for preemption */
> + gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1);
> +
> + /* Reset the preemption state */
> + set_preempt_state(a6xx_gpu, PREEMPT_NONE);
> +
> + /* Always come up on rb 0 */
> + a6xx_gpu->cur_ring = gpu->rb[0];
> +}
> +
> +#define FENCE_STATUS_WRITEDROPPED0_MASK 0x1
> +#define FENCE_STATUS_WRITEDROPPED1_MASK 0x2
> +void a6xx_preempt_trigger(struct msm_gpu *gpu)
> +{
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> + unsigned long flags;
> + struct msm_ringbuffer *ring;
> + uint64_t user_ctx_iova;
> + unsigned int cntl;
> +
> + if (gpu->nr_rings == 1)
> + return;
> +
> + /*
> + * Try to start preemption by moving from NONE to START. If
> + * unsuccessful, a preemption is already in flight
> + */
> + if (!try_preempt_state(a6xx_gpu, PREEMPT_NONE, PREEMPT_START))
> + return;
> +
> + cntl = (((a6xx_gpu->preempt_level << 6) & 0xC0) |
> + ((a6xx_gpu->skip_save_restore << 9) & 0x200) |
> + ((a6xx_gpu->uses_gmem << 8) & 0x100) | 0x1);
> +
> + /* Get the next ring to preempt to */
> + ring = get_next_ring(gpu);
> +
> + /*
> + * If no ring is populated or the highest priority ring is the current
> + * one do nothing except to update the wptr to the latest and greatest
> + */
> + if (!ring || (a6xx_gpu->cur_ring == ring)) {
> + set_preempt_state(a6xx_gpu, PREEMPT_ABORT);
> + update_wptr(gpu, a6xx_gpu->cur_ring);
> + set_preempt_state(a6xx_gpu, PREEMPT_NONE);
> + return;
> + }
> +
> + spin_lock_irqsave(&ring->lock, flags);
> + a6xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
> + spin_unlock_irqrestore(&ring->lock, flags);
> +
> + /*
> + * The GPU power collapsing between the following preemption register
> + * writes can lead to a prolonged preemption trigger sequence, so we
> + * set a keepalive bit to make sure the GPU is not power collapsed by
> + * the GMU during this time. The first fenced write will make sure to
> + * wake up the GPU(if it was power collapsed) and from there on it is
> + * not going to be power collapsed until we close the keepalive window
> + * by resetting the keepalive bit.
> + */
> + gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_SPARE_CNTL, 0x0, 0x2);
> +
> + a6xx_gmu_fenced_write(a6xx_gpu,
> + REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
> + lower_32_bits(a6xx_gpu->preempt_iova[ring->id]),
> + FENCE_STATUS_WRITEDROPPED1_MASK);
> +
> + a6xx_gmu_fenced_write(a6xx_gpu,
> + REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
> + upper_32_bits(a6xx_gpu->preempt_iova[ring->id]),
> + FENCE_STATUS_WRITEDROPPED1_MASK);
> +
> + /*
> + * Use the user context iova from the scratch memory that the CP may
> + * have written as part of the ring switch out.
> + */
> + user_ctx_iova = *((uint64_t *)a6xx_gpu->scratch_ptr + ring->id);
> +
> + a6xx_gmu_fenced_write(a6xx_gpu,
> + REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
> + lower_32_bits(user_ctx_iova),
> + FENCE_STATUS_WRITEDROPPED1_MASK);
> +
> + a6xx_gmu_fenced_write(a6xx_gpu,
> + REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
> + upper_32_bits(user_ctx_iova),
> + FENCE_STATUS_WRITEDROPPED1_MASK);
> +
> + a6xx_gpu->next_ring = ring;
> +
> + /* Start a timer to catch a stuck preemption */
> + mod_timer(&a6xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
> +
> + /* Set the preemption state to triggered */
> + set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED);
> +
> + /* Make sure everything is written before hitting the button */
> + wmb();
> +
> + /* Trigger the preemption */
> + a6xx_gmu_fenced_write(a6xx_gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl,
> + FENCE_STATUS_WRITEDROPPED1_MASK);
> +
> + /* Close the GPU keelaplive window */
> + gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_SPARE_CNTL, 0x2, 0x0);
> +}
> +
> +static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
> + struct msm_ringbuffer *ring)
> +{
> + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
> + struct msm_gpu *gpu = &adreno_gpu->base;
> + struct a6xx_preempt_record *ptr;
> + struct drm_gem_object *bo = NULL;
> + u64 iova = 0;
> +
> + ptr = msm_gem_kernel_new(gpu->dev,
> + A6XX_PREEMPT_RECORD_SIZE + A6XX_PREEMPT_COUNTER_SIZE,
> + MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
> +
> + if (IS_ERR(ptr))
> + return PTR_ERR(ptr);
> +
> + a6xx_gpu->preempt_bo[ring->id] = bo;
> + a6xx_gpu->preempt_iova[ring->id] = iova;
> + a6xx_gpu->preempt[ring->id] = ptr;
> +
> + /* Set up the defaults on the preemption record */
> + ptr->magic = A6XX_PREEMPT_RECORD_MAGIC;
> + ptr->info = 0;
> + ptr->data = 0;
> + ptr->rptr = 0;
> + ptr->wptr = 0;
> + ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
> + ptr->rptr_addr = rbmemptr(ring, rptr);
> + ptr->rbase = ring->iova;
> + ptr->counter = iova + A6XX_PREEMPT_RECORD_SIZE;
> +
> + return 0;
> +}
> +
> +void a6xx_preempt_fini(struct msm_gpu *gpu)
> +{
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> + int i;
> +
> + for (i = 0; i < gpu->nr_rings; i++) {
> + if (!a6xx_gpu->preempt_bo[i])
> + continue;
> +
> + msm_gem_put_vaddr(a6xx_gpu->preempt_bo[i]);
> +
> + if (a6xx_gpu->preempt_iova[i])
> + msm_gem_put_iova(a6xx_gpu->preempt_bo[i], gpu->aspace);
> +
> + drm_gem_object_unreference(a6xx_gpu->preempt_bo[i]);
> + a6xx_gpu->preempt_bo[i] = NULL;
> + }
> +}
> +
> +void a6xx_preempt_init(struct msm_gpu *gpu)
> +{
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> + int i;
> +
> + /* No preemption if we only have one ring */
> + if (gpu->nr_rings <= 1)
> + return;
> +
> + for (i = 0; i < gpu->nr_rings; i++) {
> + if (preempt_init_ring(a6xx_gpu, gpu->rb[i]))
> + goto fail;
> + }
> +
> + /* TODO: make this configurable? */
> + a6xx_gpu->preempt_level = 0;
> + a6xx_gpu->uses_gmem = 1;
> + a6xx_gpu->skip_save_restore = 0;
> +
> + a6xx_gpu->scratch_ptr = msm_gem_kernel_new(gpu->dev,
> + gpu->nr_rings * sizeof(uint64_t), MSM_BO_WC,
> + gpu->aspace, &a6xx_gpu->scratch_bo,
> + &a6xx_gpu->scratch_iova);
> +
> + if (IS_ERR(a6xx_gpu->scratch_ptr))
> + goto fail;
> +
> + setup_timer(&a6xx_gpu->preempt_timer, a6xx_preempt_timer,
> + (unsigned long) a6xx_gpu);
> +
> + return;
> +fail:
> + /*
> + * On any failure our adventure is over. Clean up and
> + * set nr_rings to 1 to force preemption off
> + */
> + a6xx_preempt_fini(gpu);
> + gpu->nr_rings = 1;
> +
> + return;
> +}
> +
> +void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu,
> + struct msm_gpu_submitqueue *queue)
> +{
> + if (!queue->bo)
> + return;
> +
> + msm_gem_put_iova(queue->bo, gpu->aspace);
> + msm_gem_put_vaddr(queue->bo);
> + drm_gem_object_unreference_unlocked(queue->bo);
> +}
> +
> +int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu,
> + struct msm_gpu_submitqueue *queue)
> +{
> + void *ptr;
> +
> + /*
> + * Create a per submitqueue buffer for the CP to save and restore user
> + * specific information such as the VPC streamout data.
> + */
> + ptr = msm_gem_kernel_new(gpu->dev, A6XX_PREEMPT_USER_RECORD_SIZE,
> + MSM_BO_WC, gpu->aspace, &queue->bo, &queue->bo_iova);
> +
> + if (IS_ERR(ptr))
> + return PTR_ERR(ptr);
> +
> + return 0;
> +}
> --
> 1.9.1
>
> _______________________________________________
> Freedreno mailing list
> Freedreno at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno
--
The Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project
More information about the dri-devel
mailing list