[Intel-gfx] [drm-intel:drm-intel-next-queued 5/5] drivers/gpu/drm/i915/gt/intel_lrc.c:2335:16: error: unused variable 'regs'

kbuild test robot lkp at intel.com
Thu Feb 13 05:00:55 UTC 2020


tree:   git://anongit.freedesktop.org/drm-intel drm-intel-next-queued
head:   c616d2387aeeb987f03eee848f04ffdc248c7aae
commit: c616d2387aeeb987f03eee848f04ffdc248c7aae [5/5] drm/i915/gt: Expand bad CS completion event debug
config: i386-randconfig-h003-20200213 (attached as .config)
compiler: gcc-7 (Debian 7.5.0-4) 7.5.0
reproduce:
        git checkout c616d2387aeeb987f03eee848f04ffdc248c7aae
        # save the attached .config to linux build tree
        make ARCH=i386 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp at intel.com>

All errors (new ones prefixed by >>):

   drivers/gpu/drm/i915/gt/intel_lrc.c: In function 'process_csb':
>> drivers/gpu/drm/i915/gt/intel_lrc.c:2335:16: error: unused variable 'regs' [-Werror=unused-variable]
        const u32 *regs = rq->context->lrc_reg_state;
                   ^~~~
   cc1: all warnings being treated as errors

vim +/regs +2335 drivers/gpu/drm/i915/gt/intel_lrc.c

  2222	
  2223	static void process_csb(struct intel_engine_cs *engine)
  2224	{
  2225		struct intel_engine_execlists * const execlists = &engine->execlists;
  2226		const u32 * const buf = execlists->csb_status;
  2227		const u8 num_entries = execlists->csb_size;
  2228		u8 head, tail;
  2229	
  2230		/*
  2231		 * As we modify our execlists state tracking we require exclusive
  2232		 * access. Either we are inside the tasklet, or the tasklet is disabled
  2233		 * and we assume that is only inside the reset paths and so serialised.
  2234		 */
  2235		GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
  2236			   !reset_in_progress(execlists));
  2237		GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
  2238	
  2239		/*
  2240		 * Note that csb_write, csb_status may be either in HWSP or mmio.
  2241		 * When reading from the csb_write mmio register, we have to be
  2242		 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
  2243		 * the low 4bits. As it happens we know the next 4bits are always
  2244		 * zero and so we can simply masked off the low u8 of the register
  2245		 * and treat it identically to reading from the HWSP (without having
  2246		 * to use explicit shifting and masking, and probably bifurcating
  2247		 * the code to handle the legacy mmio read).
  2248		 */
  2249		head = execlists->csb_head;
  2250		tail = READ_ONCE(*execlists->csb_write);
  2251		if (unlikely(head == tail))
  2252			return;
  2253	
  2254		/*
  2255		 * Hopefully paired with a wmb() in HW!
  2256		 *
  2257		 * We must complete the read of the write pointer before any reads
  2258		 * from the CSB, so that we do not see stale values. Without an rmb
  2259		 * (lfence) the HW may speculatively perform the CSB[] reads *before*
  2260		 * we perform the READ_ONCE(*csb_write).
  2261		 */
  2262		rmb();
  2263	
  2264		ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
  2265		do {
  2266			bool promote;
  2267	
  2268			if (++head == num_entries)
  2269				head = 0;
  2270	
  2271			/*
  2272			 * We are flying near dragons again.
  2273			 *
  2274			 * We hold a reference to the request in execlist_port[]
  2275			 * but no more than that. We are operating in softirq
  2276			 * context and so cannot hold any mutex or sleep. That
  2277			 * prevents us stopping the requests we are processing
  2278			 * in port[] from being retired simultaneously (the
  2279			 * breadcrumb will be complete before we see the
  2280			 * context-switch). As we only hold the reference to the
  2281			 * request, any pointer chasing underneath the request
  2282			 * is subject to a potential use-after-free. Thus we
  2283			 * store all of the bookkeeping within port[] as
  2284			 * required, and avoid using unguarded pointers beneath
  2285			 * request itself. The same applies to the atomic
  2286			 * status notifier.
  2287			 */
  2288	
  2289			ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
  2290				     head, buf[2 * head + 0], buf[2 * head + 1]);
  2291	
  2292			if (INTEL_GEN(engine->i915) >= 12)
  2293				promote = gen12_csb_parse(execlists, buf + 2 * head);
  2294			else
  2295				promote = gen8_csb_parse(execlists, buf + 2 * head);
  2296			if (promote) {
  2297				struct i915_request * const *old = execlists->active;
  2298	
  2299				GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
  2300	
  2301				ring_set_paused(engine, 0);
  2302	
  2303				/* Point active to the new ELSP; prevent overwriting */
  2304				WRITE_ONCE(execlists->active, execlists->pending);
  2305	
  2306				/* cancel old inflight, prepare for switch */
  2307				trace_ports(execlists, "preempted", old);
  2308				while (*old)
  2309					execlists_schedule_out(*old++);
  2310	
  2311				/* switch pending to inflight */
  2312				WRITE_ONCE(execlists->active,
  2313					   memcpy(execlists->inflight,
  2314						  execlists->pending,
  2315						  execlists_num_ports(execlists) *
  2316						  sizeof(*execlists->pending)));
  2317	
  2318				WRITE_ONCE(execlists->pending[0], NULL);
  2319			} else {
  2320				GEM_BUG_ON(!*execlists->active);
  2321	
  2322				/* port0 completed, advanced to port1 */
  2323				trace_ports(execlists, "completed", execlists->active);
  2324	
  2325				/*
  2326				 * We rely on the hardware being strongly
  2327				 * ordered, that the breadcrumb write is
  2328				 * coherent (visible from the CPU) before the
  2329				 * user interrupt and CSB is processed.
  2330				 */
  2331				if (GEM_SHOW_DEBUG() &&
  2332				    !i915_request_completed(*execlists->active) &&
  2333				    !reset_in_progress(execlists)) {
  2334					struct i915_request *rq = *execlists->active;
> 2335					const u32 *regs = rq->context->lrc_reg_state;
  2336	
  2337					ENGINE_TRACE(engine,
  2338						     "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
  2339						     ENGINE_READ(engine, RING_START),
  2340						     ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
  2341						     ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
  2342						     ENGINE_READ(engine, RING_CTL),
  2343						     ENGINE_READ(engine, RING_MI_MODE));
  2344					ENGINE_TRACE(engine,
  2345						     "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
  2346						     i915_ggtt_offset(rq->ring->vma),
  2347						     rq->head, rq->tail,
  2348						     rq->fence.context,
  2349						     lower_32_bits(rq->fence.seqno),
  2350						     hwsp_seqno(rq));
  2351					ENGINE_TRACE(engine,
  2352						     "ctx:{start:%08x, head:%04x, tail:%04x}, ",
  2353						     regs[CTX_RING_START],
  2354						     regs[CTX_RING_HEAD],
  2355						     regs[CTX_RING_TAIL]);
  2356	
  2357					GEM_BUG_ON("context completed before request");
  2358				}
  2359	
  2360				execlists_schedule_out(*execlists->active++);
  2361	
  2362				GEM_BUG_ON(execlists->active - execlists->inflight >
  2363					   execlists_num_ports(execlists));
  2364			}
  2365		} while (head != tail);
  2366	
  2367		execlists->csb_head = head;
  2368		set_timeslice(engine);
  2369	
  2370		/*
  2371		 * Gen11 has proven to fail wrt global observation point between
  2372		 * entry and tail update, failing on the ordering and thus
  2373		 * we see an old entry in the context status buffer.
  2374		 *
  2375		 * Forcibly evict out entries for the next gpu csb update,
  2376		 * to increase the odds that we get a fresh entries with non
  2377		 * working hardware. The cost for doing so comes out mostly with
  2378		 * the wash as hardware, working or not, will need to do the
  2379		 * invalidation before.
  2380		 */
  2381		invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
  2382	}
  2383	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
-------------- next part --------------
A non-text attachment was scrubbed...
Name: .config.gz
Type: application/gzip
Size: 32775 bytes
Desc: not available
URL: <https://lists.freedesktop.org/archives/intel-gfx/attachments/20200213/12cb8f11/attachment-0001.gz>


More information about the Intel-gfx mailing list