[gvt-linux:gvt-staging 1/3] drivers/gpu/drm/i915/gvt/scheduler.c:230:2: note: in expansion of macro 'if'
kbuild test robot
fengguang.wu at intel.com
Thu Feb 22 08:39:41 UTC 2018
tree: https://github.com/01org/gvt-linux.git gvt-staging
head: af729c04a93be3d6a26a17040c5848fa51a24220
commit: 18feb77e487fd7bdde66e46945d004b7bc8effba [1/3] Merge remote-tracking branch 'origin/gvt-next' into gvt-staging
config: x86_64-randconfig-s0-02221347 (attached as .config)
compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026
reproduce:
git checkout 18feb77e487fd7bdde66e46945d004b7bc8effba
# save the attached .config to linux build tree
make ARCH=x86_64
All warnings (new ones prefixed by >>):
drivers/gpu/drm/i915/gvt/scheduler.c: In function 'copy_workload_to_ring_buffer':
drivers/gpu/drm/i915/gvt/scheduler.c:228:37: error: initialization from incompatible pointer type [-Werror=incompatible-pointer-types]
struct drm_i915_gem_request *req = workload->req;
^~~~~~~~
In file included from include/linux/err.h:5:0,
from include/linux/kthread.h:5,
from drivers/gpu/drm/i915/gvt/scheduler.c:36:
drivers/gpu/drm/i915/gvt/scheduler.c:230:21: error: dereferencing pointer to incomplete type 'struct drm_i915_gem_request'
if (IS_KABYLAKE(req->i915) &&
^
include/linux/compiler.h:58:30: note: in definition of macro '__trace_if'
if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
^~~~
>> drivers/gpu/drm/i915/gvt/scheduler.c:230:2: note: in expansion of macro 'if'
if (IS_KABYLAKE(req->i915) &&
^~
>> drivers/gpu/drm/i915/i915_drv.h:2619:31: note: in expansion of macro 'IS_PLATFORM'
#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
^~~~~~~~~~~
drivers/gpu/drm/i915/gvt/scheduler.c:230:6: note: in expansion of macro 'IS_KABYLAKE'
if (IS_KABYLAKE(req->i915) &&
^~~~~~~~~~~
cc1: some warnings being treated as errors
vim +/if +230 drivers/gpu/drm/i915/gvt/scheduler.c
e47340578 Zhi Wang 2016-05-01 @36 #include <linux/kthread.h>
e47340578 Zhi Wang 2016-05-01 37
feddf6e86 Zhenyu Wang 2016-10-20 38 #include "i915_drv.h"
feddf6e86 Zhenyu Wang 2016-10-20 39 #include "gvt.h"
feddf6e86 Zhenyu Wang 2016-10-20 40
e47340578 Zhi Wang 2016-05-01 41 #define RING_CTX_OFF(x) \
e47340578 Zhi Wang 2016-05-01 42 offsetof(struct execlist_ring_context, x)
e47340578 Zhi Wang 2016-05-01 43
999ccb401 Du, Changbin 2016-10-20 44 static void set_context_pdp_root_pointer(
999ccb401 Du, Changbin 2016-10-20 45 struct execlist_ring_context *ring_context,
e47340578 Zhi Wang 2016-05-01 46 u32 pdp[8])
e47340578 Zhi Wang 2016-05-01 47 {
e47340578 Zhi Wang 2016-05-01 48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
e47340578 Zhi Wang 2016-05-01 49 int i;
e47340578 Zhi Wang 2016-05-01 50
e47340578 Zhi Wang 2016-05-01 51 for (i = 0; i < 8; i++)
e47340578 Zhi Wang 2016-05-01 52 pdp_pair[i].val = pdp[7 - i];
e47340578 Zhi Wang 2016-05-01 53 }
e47340578 Zhi Wang 2016-05-01 54
e47340578 Zhi Wang 2016-05-01 55 static int populate_shadow_context(struct intel_vgpu_workload *workload)
e47340578 Zhi Wang 2016-05-01 56 {
e47340578 Zhi Wang 2016-05-01 57 struct intel_vgpu *vgpu = workload->vgpu;
e47340578 Zhi Wang 2016-05-01 58 struct intel_gvt *gvt = vgpu->gvt;
e47340578 Zhi Wang 2016-05-01 59 int ring_id = workload->ring_id;
1406a14b0 Zhi Wang 2017-09-10 60 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
e47340578 Zhi Wang 2016-05-01 61 struct drm_i915_gem_object *ctx_obj =
e47340578 Zhi Wang 2016-05-01 62 shadow_ctx->engine[ring_id].state->obj;
e47340578 Zhi Wang 2016-05-01 63 struct execlist_ring_context *shadow_ring_context;
e47340578 Zhi Wang 2016-05-01 64 struct page *page;
e47340578 Zhi Wang 2016-05-01 65 void *dst;
e47340578 Zhi Wang 2016-05-01 66 unsigned long context_gpa, context_page_num;
e47340578 Zhi Wang 2016-05-01 67 int i;
e47340578 Zhi Wang 2016-05-01 68
e47340578 Zhi Wang 2016-05-01 69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
e47340578 Zhi Wang 2016-05-01 70 workload->ctx_desc.lrca);
e47340578 Zhi Wang 2016-05-01 71
63ffbcdad Joonas Lahtinen 2017-04-28 72 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
e47340578 Zhi Wang 2016-05-01 73
e47340578 Zhi Wang 2016-05-01 74 context_page_num = context_page_num >> PAGE_SHIFT;
e47340578 Zhi Wang 2016-05-01 75
e47340578 Zhi Wang 2016-05-01 76 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
e47340578 Zhi Wang 2016-05-01 77 context_page_num = 19;
e47340578 Zhi Wang 2016-05-01 78
e47340578 Zhi Wang 2016-05-01 79 i = 2;
e47340578 Zhi Wang 2016-05-01 80
e47340578 Zhi Wang 2016-05-01 81 while (i < context_page_num) {
e47340578 Zhi Wang 2016-05-01 82 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
e47340578 Zhi Wang 2016-05-01 83 (u32)((workload->ctx_desc.lrca + i) <<
9556e1188 Zhi Wang 2017-10-10 84 I915_GTT_PAGE_SHIFT));
e47340578 Zhi Wang 2016-05-01 85 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
695fbc08d Tina Zhang 2017-03-10 86 gvt_vgpu_err("Invalid guest context descriptor\n");
5c56883a9 fred gao 2017-09-20 87 return -EFAULT;
e47340578 Zhi Wang 2016-05-01 88 }
e47340578 Zhi Wang 2016-05-01 89
0b29c75a0 Michel Thierry 2017-09-13 90 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
c754936fe Xiaoguang Chen 2016-11-03 91 dst = kmap(page);
e47340578 Zhi Wang 2016-05-01 92 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
9556e1188 Zhi Wang 2017-10-10 93 I915_GTT_PAGE_SIZE);
c754936fe Xiaoguang Chen 2016-11-03 94 kunmap(page);
e47340578 Zhi Wang 2016-05-01 95 i++;
e47340578 Zhi Wang 2016-05-01 96 }
e47340578 Zhi Wang 2016-05-01 97
e47340578 Zhi Wang 2016-05-01 98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
c754936fe Xiaoguang Chen 2016-11-03 99 shadow_ring_context = kmap(page);
e47340578 Zhi Wang 2016-05-01 100
e47340578 Zhi Wang 2016-05-01 101 #define COPY_REG(name) \
e47340578 Zhi Wang 2016-05-01 102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
e47340578 Zhi Wang 2016-05-01 103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
e47340578 Zhi Wang 2016-05-01 104
e47340578 Zhi Wang 2016-05-01 105 COPY_REG(ctx_ctrl);
e47340578 Zhi Wang 2016-05-01 106 COPY_REG(ctx_timestamp);
e47340578 Zhi Wang 2016-05-01 107
e47340578 Zhi Wang 2016-05-01 108 if (ring_id == RCS) {
e47340578 Zhi Wang 2016-05-01 109 COPY_REG(bb_per_ctx_ptr);
e47340578 Zhi Wang 2016-05-01 110 COPY_REG(rcs_indirect_ctx);
e47340578 Zhi Wang 2016-05-01 111 COPY_REG(rcs_indirect_ctx_offset);
e47340578 Zhi Wang 2016-05-01 112 }
e47340578 Zhi Wang 2016-05-01 113 #undef COPY_REG
e47340578 Zhi Wang 2016-05-01 114
e47340578 Zhi Wang 2016-05-01 115 set_context_pdp_root_pointer(shadow_ring_context,
f4d1a5a4c Changbin Du 2018-01-30 116 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
e47340578 Zhi Wang 2016-05-01 117
e47340578 Zhi Wang 2016-05-01 118 intel_gvt_hypervisor_read_gpa(vgpu,
e47340578 Zhi Wang 2016-05-01 119 workload->ring_context_gpa +
e47340578 Zhi Wang 2016-05-01 120 sizeof(*shadow_ring_context),
e47340578 Zhi Wang 2016-05-01 121 (void *)shadow_ring_context +
e47340578 Zhi Wang 2016-05-01 122 sizeof(*shadow_ring_context),
9556e1188 Zhi Wang 2017-10-10 123 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
e47340578 Zhi Wang 2016-05-01 124
c754936fe Xiaoguang Chen 2016-11-03 125 kunmap(page);
e47340578 Zhi Wang 2016-05-01 126 return 0;
e47340578 Zhi Wang 2016-05-01 127 }
e47340578 Zhi Wang 2016-05-01 128
e61e0f51b Chris Wilson 2018-02-21 129 static inline bool is_gvt_request(struct i915_request *req)
bc2d4b62d Changbin Du 2017-03-22 130 {
bc2d4b62d Changbin Du 2017-03-22 131 return i915_gem_context_force_single_submission(req->ctx);
bc2d4b62d Changbin Du 2017-03-22 132 }
bc2d4b62d Changbin Du 2017-03-22 133
295764cd2 Xiong Zhang 2017-11-07 134 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
295764cd2 Xiong Zhang 2017-11-07 135 {
295764cd2 Xiong Zhang 2017-11-07 136 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
295764cd2 Xiong Zhang 2017-11-07 137 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
295764cd2 Xiong Zhang 2017-11-07 138 i915_reg_t reg;
295764cd2 Xiong Zhang 2017-11-07 139
295764cd2 Xiong Zhang 2017-11-07 140 reg = RING_INSTDONE(ring_base);
295764cd2 Xiong Zhang 2017-11-07 141 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
295764cd2 Xiong Zhang 2017-11-07 142 reg = RING_ACTHD(ring_base);
295764cd2 Xiong Zhang 2017-11-07 143 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
295764cd2 Xiong Zhang 2017-11-07 144 reg = RING_ACTHD_UDW(ring_base);
295764cd2 Xiong Zhang 2017-11-07 145 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
295764cd2 Xiong Zhang 2017-11-07 146 }
295764cd2 Xiong Zhang 2017-11-07 147
e47340578 Zhi Wang 2016-05-01 148 static int shadow_context_status_change(struct notifier_block *nb,
e47340578 Zhi Wang 2016-05-01 149 unsigned long action, void *data)
e47340578 Zhi Wang 2016-05-01 150 {
e61e0f51b Chris Wilson 2018-02-21 151 struct i915_request *req = data;
3fc03069b Changbin Du 2017-03-13 152 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
3fc03069b Changbin Du 2017-03-13 153 shadow_ctx_notifier_block[req->engine->id]);
3fc03069b Changbin Du 2017-03-13 154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
0e86cc9cc Changbin Du 2017-05-04 155 enum intel_engine_id ring_id = req->engine->id;
0e86cc9cc Changbin Du 2017-05-04 156 struct intel_vgpu_workload *workload;
679fd3eba Changbin Du 2017-11-13 157 unsigned long flags;
0e86cc9cc Changbin Du 2017-05-04 158
0e86cc9cc Changbin Du 2017-05-04 159 if (!is_gvt_request(req)) {
679fd3eba Changbin Du 2017-11-13 160 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
0e86cc9cc Changbin Du 2017-05-04 161 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
0e86cc9cc Changbin Du 2017-05-04 162 scheduler->engine_owner[ring_id]) {
0e86cc9cc Changbin Du 2017-05-04 163 /* Switch ring from vGPU to host. */
0e86cc9cc Changbin Du 2017-05-04 164 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
0e86cc9cc Changbin Du 2017-05-04 165 NULL, ring_id);
0e86cc9cc Changbin Du 2017-05-04 166 scheduler->engine_owner[ring_id] = NULL;
0e86cc9cc Changbin Du 2017-05-04 167 }
679fd3eba Changbin Du 2017-11-13 168 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
0e86cc9cc Changbin Du 2017-05-04 169
0e86cc9cc Changbin Du 2017-05-04 170 return NOTIFY_OK;
0e86cc9cc Changbin Du 2017-05-04 171 }
e47340578 Zhi Wang 2016-05-01 172
0e86cc9cc Changbin Du 2017-05-04 173 workload = scheduler->current_workload[ring_id];
0e86cc9cc Changbin Du 2017-05-04 174 if (unlikely(!workload))
9272f73f7 Chuanxiao Dong 2017-02-17 175 return NOTIFY_OK;
9272f73f7 Chuanxiao Dong 2017-02-17 176
e47340578 Zhi Wang 2016-05-01 177 switch (action) {
e47340578 Zhi Wang 2016-05-01 178 case INTEL_CONTEXT_SCHEDULE_IN:
679fd3eba Changbin Du 2017-11-13 179 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
0e86cc9cc Changbin Du 2017-05-04 180 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
0e86cc9cc Changbin Du 2017-05-04 181 /* Switch ring from host to vGPU or vGPU to vGPU. */
0e86cc9cc Changbin Du 2017-05-04 182 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
0e86cc9cc Changbin Du 2017-05-04 183 workload->vgpu, ring_id);
0e86cc9cc Changbin Du 2017-05-04 184 scheduler->engine_owner[ring_id] = workload->vgpu;
0e86cc9cc Changbin Du 2017-05-04 185 } else
0e86cc9cc Changbin Du 2017-05-04 186 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
0e86cc9cc Changbin Du 2017-05-04 187 ring_id, workload->vgpu->id);
679fd3eba Changbin Du 2017-11-13 188 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
e47340578 Zhi Wang 2016-05-01 189 atomic_set(&workload->shadow_ctx_active, 1);
e47340578 Zhi Wang 2016-05-01 190 break;
e47340578 Zhi Wang 2016-05-01 191 case INTEL_CONTEXT_SCHEDULE_OUT:
295764cd2 Xiong Zhang 2017-11-07 192 save_ring_hw_state(workload->vgpu, ring_id);
e47340578 Zhi Wang 2016-05-01 193 atomic_set(&workload->shadow_ctx_active, 0);
e47340578 Zhi Wang 2016-05-01 194 break;
da5f99eac Zhenyu Wang 2017-12-01 195 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
da5f99eac Zhenyu Wang 2017-12-01 196 save_ring_hw_state(workload->vgpu, ring_id);
da5f99eac Zhenyu Wang 2017-12-01 197 break;
e47340578 Zhi Wang 2016-05-01 198 default:
e47340578 Zhi Wang 2016-05-01 199 WARN_ON(1);
e47340578 Zhi Wang 2016-05-01 200 return NOTIFY_OK;
e47340578 Zhi Wang 2016-05-01 201 }
e47340578 Zhi Wang 2016-05-01 202 wake_up(&workload->shadow_ctx_status_wq);
e47340578 Zhi Wang 2016-05-01 203 return NOTIFY_OK;
e47340578 Zhi Wang 2016-05-01 204 }
e47340578 Zhi Wang 2016-05-01 205
9dfb8e5b9 Kechen Lu 2017-08-10 206 static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
9dfb8e5b9 Kechen Lu 2017-08-10 207 struct intel_engine_cs *engine)
9dfb8e5b9 Kechen Lu 2017-08-10 208 {
9dfb8e5b9 Kechen Lu 2017-08-10 209 struct intel_context *ce = &ctx->engine[engine->id];
9dfb8e5b9 Kechen Lu 2017-08-10 210 u64 desc = 0;
9dfb8e5b9 Kechen Lu 2017-08-10 211
9dfb8e5b9 Kechen Lu 2017-08-10 212 desc = ce->lrc_desc;
9dfb8e5b9 Kechen Lu 2017-08-10 213
9dfb8e5b9 Kechen Lu 2017-08-10 214 /* Update bits 0-11 of the context descriptor which includes flags
9dfb8e5b9 Kechen Lu 2017-08-10 215 * like GEN8_CTX_* cached in desc_template
9dfb8e5b9 Kechen Lu 2017-08-10 216 */
9dfb8e5b9 Kechen Lu 2017-08-10 217 desc &= U64_MAX << 12;
9dfb8e5b9 Kechen Lu 2017-08-10 218 desc |= ctx->desc_template & ((1ULL << 12) - 1);
9dfb8e5b9 Kechen Lu 2017-08-10 219
9dfb8e5b9 Kechen Lu 2017-08-10 220 ce->lrc_desc = desc;
9dfb8e5b9 Kechen Lu 2017-08-10 221 }
9dfb8e5b9 Kechen Lu 2017-08-10 222
0a53bc07f fred gao 2017-08-18 223 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
0a53bc07f fred gao 2017-08-18 224 {
0a53bc07f fred gao 2017-08-18 225 struct intel_vgpu *vgpu = workload->vgpu;
0a53bc07f fred gao 2017-08-18 226 void *shadow_ring_buffer_va;
0a53bc07f fred gao 2017-08-18 227 u32 *cs;
47d4337e9 Weinan Li 2018-02-13 228 struct drm_i915_gem_request *req = workload->req;
47d4337e9 Weinan Li 2018-02-13 229
47d4337e9 Weinan Li 2018-02-13 @230 if (IS_KABYLAKE(req->i915) &&
47d4337e9 Weinan Li 2018-02-13 231 is_inhibit_context(req->ctx, req->engine->id))
47d4337e9 Weinan Li 2018-02-13 232 intel_vgpu_restore_inhibit_context(vgpu, req);
0a53bc07f fred gao 2017-08-18 233
0a53bc07f fred gao 2017-08-18 234 /* allocate shadow ring buffer */
0a53bc07f fred gao 2017-08-18 235 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
0a53bc07f fred gao 2017-08-18 236 if (IS_ERR(cs)) {
0a53bc07f fred gao 2017-08-18 237 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
0a53bc07f fred gao 2017-08-18 238 workload->rb_len);
0a53bc07f fred gao 2017-08-18 239 return PTR_ERR(cs);
0a53bc07f fred gao 2017-08-18 240 }
0a53bc07f fred gao 2017-08-18 241
0a53bc07f fred gao 2017-08-18 242 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
0a53bc07f fred gao 2017-08-18 243
0a53bc07f fred gao 2017-08-18 244 /* get shadow ring buffer va */
0a53bc07f fred gao 2017-08-18 245 workload->shadow_ring_buffer_va = cs;
0a53bc07f fred gao 2017-08-18 246
0a53bc07f fred gao 2017-08-18 247 memcpy(cs, shadow_ring_buffer_va,
0a53bc07f fred gao 2017-08-18 248 workload->rb_len);
0a53bc07f fred gao 2017-08-18 249
0a53bc07f fred gao 2017-08-18 250 cs += workload->rb_len / sizeof(u32);
0a53bc07f fred gao 2017-08-18 251 intel_ring_advance(workload->req, cs);
0a53bc07f fred gao 2017-08-18 252
0a53bc07f fred gao 2017-08-18 253 return 0;
0a53bc07f fred gao 2017-08-18 254 }
0a53bc07f fred gao 2017-08-18 255
:::::: The code at line 230 was first introduced by commit
:::::: 47d4337e914f3427477cd8154708f3b06544afd6 drm/i915/gvt: init mmio by lri command in vgpu inhibit context
:::::: TO: Weinan Li <weinan.z.li at intel.com>
:::::: CC: Zhenyu Wang <zhenyuw at linux.intel.com>
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
-------------- next part --------------
A non-text attachment was scrubbed...
Name: .config.gz
Type: application/gzip
Size: 32683 bytes
Desc: not available
URL: <https://lists.freedesktop.org/archives/intel-gvt-dev/attachments/20180222/a4a3ed33/attachment-0001.gz>
More information about the intel-gvt-dev
mailing list