[Mesa-dev] [PATCH libdrm 7/9] panfrost/midgard: Move draw_count into panfrost_job
Rohan Garg
rohan.garg at collabora.com
Wed Jun 12 11:24:37 UTC 2019
Refactor code to use draw_counts from a panfrost_job
---
src/gallium/drivers/panfrost/pan_context.c | 31 ++++++++++++----------
src/gallium/drivers/panfrost/pan_context.h | 2 --
src/gallium/drivers/panfrost/pan_job.c | 4 +--
src/gallium/drivers/panfrost/pan_job.h | 2 ++
4 files changed, 21 insertions(+), 18 deletions(-)
diff --git a/src/gallium/drivers/panfrost/pan_context.c b/src/gallium/drivers/panfrost/pan_context.c
index eaceaa8725e..ced25cf4b82 100644
--- a/src/gallium/drivers/panfrost/pan_context.c
+++ b/src/gallium/drivers/panfrost/pan_context.c
@@ -580,10 +580,12 @@ panfrost_link_job_pair(struct mali_job_descriptor_header *first, mali_ptr next)
struct panfrost_transfer
panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler)
{
+ struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+
/* Each draw call corresponds to two jobs, and the set-value job is first */
- int draw_job_index = 1 + (2 * ctx->draw_count) + 1;
+ int draw_job_index = 1 + (2 * job->draw_count) + 1;
- struct mali_job_descriptor_header job = {
+ struct mali_job_descriptor_header next_job = {
.job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX,
.job_index = draw_job_index + (is_tiler ? 1 : 0),
#ifdef __LP64__
@@ -605,25 +607,25 @@ panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler)
if (is_tiler) {
/* Tiler jobs depend on vertex jobs */
- job.job_dependency_index_1 = draw_job_index;
+ next_job.job_dependency_index_1 = draw_job_index;
/* Tiler jobs also depend on the previous tiler job */
- if (ctx->draw_count) {
- job.job_dependency_index_2 = draw_job_index - 1;
+ if (job->draw_count) {
+ next_job.job_dependency_index_2 = draw_job_index - 1;
/* Previous tiler job points to this tiler job */
- panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->draw_count - 1], transfer.gpu);
+ panfrost_link_job_pair(ctx->u_tiler_jobs[job->draw_count - 1], transfer.gpu);
} else {
/* The only vertex job so far points to first tiler job */
panfrost_link_job_pair(ctx->u_vertex_jobs[0], transfer.gpu);
}
} else {
- if (ctx->draw_count) {
+ if (job->draw_count) {
/* Previous vertex job points to this vertex job */
- panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->draw_count - 1], transfer.gpu);
+ panfrost_link_job_pair(ctx->u_vertex_jobs[job->draw_count - 1], transfer.gpu);
} else {
/* Have the first vertex job depend on the set value job */
- job.job_dependency_index_1 = ctx->u_set_value_job->job_index;
+ next_job.job_dependency_index_1 = ctx->u_set_value_job->job_index;
panfrost_link_job_pair(ctx->u_set_value_job, transfer.gpu);
}
}
@@ -1245,8 +1247,9 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
static void
panfrost_queue_draw(struct panfrost_context *ctx)
{
+ struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
/* TODO: Expand the array? */
- if (ctx->draw_count >= MAX_DRAW_CALLS) {
+ if (job->draw_count >= MAX_DRAW_CALLS) {
DBG("Job buffer overflow, ignoring draw\n");
assert(0);
}
@@ -1255,7 +1258,7 @@ panfrost_queue_draw(struct panfrost_context *ctx)
panfrost_emit_for_draw(ctx, true);
/* We need a set_value job before any other draw jobs */
- if (ctx->draw_count == 0)
+ if (job->draw_count == 0)
panfrost_set_value_job(ctx);
struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false);
@@ -1264,7 +1267,7 @@ panfrost_queue_draw(struct panfrost_context *ctx)
struct panfrost_transfer tiler = panfrost_vertex_tiler_job(ctx, true);
ctx->u_tiler_jobs[ctx->tiler_job_count] = (struct mali_job_descriptor_header *) tiler.cpu;
- ctx->draw_count++;
+ job->draw_count++;
}
/* The entire frame is in memory -- send it off to the kernel! */
@@ -1278,7 +1281,7 @@ panfrost_submit_frame(struct panfrost_context *ctx, bool flush_immediate,
struct panfrost_screen *screen = pan_screen(gallium->screen);
/* Edge case if screen is cleared and nothing else */
- bool has_draws = ctx->draw_count > 0;
+ bool has_draws = job->draw_count > 0;
/* Workaround a bizarre lockup (a hardware errata?) */
if (!has_draws)
@@ -1323,7 +1326,7 @@ panfrost_flush(
struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
/* Nothing to do! */
- if (!ctx->draw_count && !job->clear) return;
+ if (!job->draw_count && !job->clear) return;
/* Whether to stall the pipeline for immediately correct results */
bool flush_immediate = flags & PIPE_FLUSH_END_OF_FRAME;
diff --git a/src/gallium/drivers/panfrost/pan_context.h b/src/gallium/drivers/panfrost/pan_context.h
index 27bb92b8330..ee02edccfd3 100644
--- a/src/gallium/drivers/panfrost/pan_context.h
+++ b/src/gallium/drivers/panfrost/pan_context.h
@@ -153,8 +153,6 @@ struct panfrost_context {
* and tiler jobs, linked to the fragment job at the end. See the
* presentations for more information how this works */
- unsigned draw_count;
-
mali_ptr set_value_job;
struct mali_job_descriptor_header *u_set_value_job;
diff --git a/src/gallium/drivers/panfrost/pan_job.c b/src/gallium/drivers/panfrost/pan_job.c
index be2742a0dc5..3ae7450c189 100644
--- a/src/gallium/drivers/panfrost/pan_job.c
+++ b/src/gallium/drivers/panfrost/pan_job.c
@@ -148,7 +148,7 @@ panfrost_job_submit(struct panfrost_context *ctx, struct panfrost_job *job)
struct panfrost_screen *screen = pan_screen(gallium->screen);
int ret;
- bool has_draws = ctx->draw_count > 0;
+ bool has_draws = job->draw_count > 0;
bool is_scanout = panfrost_is_scanout(ctx);
if (!job)
@@ -160,7 +160,7 @@ panfrost_job_submit(struct panfrost_context *ctx, struct panfrost_job *job)
fprintf(stderr, "panfrost_job_submit failed: %d\n", ret);
/* Reset job counters */
- ctx->draw_count = 0;
+ job->draw_count = 0;
ctx->vertex_job_count = 0;
ctx->tiler_job_count = 0;
}
diff --git a/src/gallium/drivers/panfrost/pan_job.h b/src/gallium/drivers/panfrost/pan_job.h
index 2e7c0532341..472a02c53f6 100644
--- a/src/gallium/drivers/panfrost/pan_job.h
+++ b/src/gallium/drivers/panfrost/pan_job.h
@@ -57,6 +57,8 @@ struct panfrost_job {
/* BOs referenced -- will be used for flushing logic */
struct set *bos;
+
+ unsigned draw_count;
};
/* Functions for managing the above */
--
2.17.1
More information about the mesa-dev
mailing list