[Intel-gfx] [PATCH] Wait accounting [ver. 3]
Chris Wilson
chris at chris-wilson.co.uk
Mon May 25 11:54:58 CEST 2009
On Fri, 2009-05-22 at 12:58 -0400, Ben Gamari wrote:
> Here is the latest (and hopefully close to final) version of my wait accounting
> patch. The patch adds a debugfs file where one can find the total time and
> number of waits in the batchbuffer submission path. It is hoped that this will
> make finding stalls easier. This time it's actually compile-tested (previously
> I was compiling without the necessary macro defined) with numerous fixes and cleanups
> from the previous version. Let me know what you think.
Nice work! I've made several amendments, principally to pass a token to
i915_wait_request(). The main advantage of this is that we only
accumulate if we actually need to wait. Couple that with tracking more
sources (admittedly some are^W^Wshould be impossible!) and it becomes
quite useful.
(The patch is against my local tree, but the alterations are simple
enough.)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e27f043..3ca2589 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -129,10 +129,20 @@ struct drm_i915_fence_reg {
};
enum drm_i915_wait_srcs {
- I915_WAITSRC_WAIT_FOR_ACTIVE_OBJ,
- I915_WAITSRC_WAIT_ON_RENDER_FOR_OBJ,
- I915_WAITSRC_FLUSH_FOR_OBJ,
- I915_WAITSRC_FLUSH_FOR_FENCE,
+ I915_WAITSRC_RELOCATE,
+ I915_WAITSRC_EVICT,
+ I915_WAITSRC_EVICT_WAIT,
+ I915_WAITSRC_EVICT_FLUSH,
+ I915_WAITSRC_PWRITE,
+ I915_WAITSRC_PREAD,
+ I915_WAITSRC_CHANGE_DOMAIN,
+ I915_WAITSRC_FREE,
+ I915_WAITSRC_GET_FENCE,
+ I915_WAITSRC_PUT_FENCE,
+ I915_WAITSRC_CHANGE_TILING,
+ I915_WAITSRC_THROTTLE,
+ I915_WAITSRC_LEAVEVT,
+ I915_WAITSRC_FBO,
I915_WAITSRC_LAST
};
@@ -140,14 +150,17 @@ enum drm_i915_wait_srcs {
#ifdef I915_WAIT_ACCOUNTING
#define MIGHT_WAIT() struct timeval _wait_ts_begin, _wait_ts_end;
#define BEGIN_WAIT() do_gettimeofday(&_wait_ts_begin);
-#define END_WAIT(src) { \
+#define END_WAIT(priv, src) do { \
+ drm_i915_private_t *dev_priv__ = (priv); \
do_gettimeofday(&_wait_ts_end); \
- dev_priv->mm.wait_time[src] += _wait_ts_end.tv_usec - _wait_ts_begin.tv_usec; \
- dev_priv->mm.wait_count[src]++; }
+ dev_priv__->mm.wait_time[src] += _wait_ts_end.tv_usec - _wait_ts_begin.tv_usec; \
+ dev_priv__->mm.wait_time[src] += 1000000*(_wait_ts_end.tv_sec - _wait_ts_begin.tv_sec); \
+ dev_priv__->mm.wait_count[src]++; \
+}while(0)
#else
#define MIGHT_WAIT() {}
#define BEGIN_WAIT() {}
-#define END_WAIT(src) {}
+#define END_WAIT(priv, src) {}
#endif
typedef struct drm_i915_private {
@@ -658,7 +671,7 @@ int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
+int i915_gem_object_unbind(struct drm_gem_object *obj, int wait_reason);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
@@ -675,7 +688,8 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
int i915_gem_idle(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
- int write);
+ int write,
+ int reason);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, int id);
void i915_gem_detach_phys_object(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0b491f9..d7a5b21 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,12 +38,15 @@ static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
+ int write,
+ int reason);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
- uint64_t size);
+ uint64_t size,
+ int reason);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+ int wait_reason);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
@@ -276,7 +279,8 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
goto fail_unlock;
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
+ args->size,
+ I915_WAITSRC_PREAD);
if (ret != 0)
goto fail_put_pages;
@@ -371,7 +375,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
goto fail_unlock;
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
+ args->size,
+ I915_WAITSRC_PREAD);
if (ret != 0)
goto fail_put_pages;
@@ -573,7 +578,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1, I915_WAITSRC_PWRITE);
if (ret)
goto fail;
@@ -667,7 +672,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto out_unlock;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1, I915_WAITSRC_PWRITE);
if (ret)
goto out_unpin_object;
@@ -749,7 +754,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_unlock;
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1, I915_WAITSRC_PWRITE);
if (ret != 0)
goto fail_put_pages;
@@ -845,7 +850,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_unlock;
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1, I915_WAITSRC_PWRITE);
if (ret != 0)
goto fail_put_pages;
@@ -1011,7 +1016,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj, obj->size, read_domains, write_domain);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
- ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ ret = i915_gem_object_set_to_gtt_domain(obj,
+ write_domain != 0,
+ I915_WAITSRC_CHANGE_DOMAIN);
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
@@ -1020,7 +1027,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret == -EINVAL)
ret = 0;
} else {
- ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ ret = i915_gem_object_set_to_cpu_domain(obj,
+ write_domain != 0,
+ I915_WAITSRC_CHANGE_DOMAIN);
}
drm_gem_object_unreference(obj);
@@ -1688,7 +1697,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
* request and object lists appropriately for that event.
*/
static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno, int reason)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1697,6 +1706,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
BUG_ON(seqno == 0);
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+ MIGHT_WAIT();
+
ier = I915_READ(IER);
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
@@ -1705,6 +1716,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
i915_driver_irq_postinstall(dev);
}
+ BEGIN_WAIT();
+
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
@@ -1713,6 +1726,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
dev_priv->mm.wedged);
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
+
+ if (ret == 0)
+ END_WAIT(dev_priv, reason);
}
if (dev_priv->mm.wedged)
ret = -EIO;
@@ -1808,7 +1824,7 @@ i915_gem_flush(struct drm_device *dev,
* safe to unbind from the GTT or access from the CPU.
*/
static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+i915_gem_object_wait_rendering(struct drm_gem_object *obj, int wait_reason)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1827,7 +1843,9 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
DRM_INFO("%s: object %p wait for seqno %08x\n",
__func__, obj, obj_priv->last_rendering_seqno);
#endif
- ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+ ret = i915_wait_request(dev,
+ obj_priv->last_rendering_seqno,
+ wait_reason);
if (ret != 0)
return ret;
}
@@ -1839,7 +1857,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
* Unbinds an object from the GTT aperture.
*/
int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_object_unbind(struct drm_gem_object *obj, int wait_reason)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1864,7 +1882,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1, wait_reason);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("set_domain failed: %d\n", ret);
@@ -1874,7 +1892,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
i915_gem_object_flush_gpu_write_domain(obj);
i915_gem_object_flush_gtt_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, wait_reason);
if (ret != 0)
return ret;
obj->read_domains &= ~I915_GEM_GPU_DOMAINS;
@@ -1920,8 +1938,7 @@ i915_gem_evict_something(struct drm_device *dev)
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret = 0;
- int wait_reason = I915_WAITSRC_WAIT_FOR_ACTIVE_OBJ;
- MIGHT_WAIT();
+ int wait_reason = I915_WAITSRC_EVICT_WAIT;
for (;;) {
/* If there's an inactive buffer available now, grab it
@@ -1939,10 +1956,8 @@ i915_gem_evict_something(struct drm_device *dev)
BUG_ON(obj_priv->active);
/* Wait on the rendering and unbind the buffer. */
- BEGIN_WAIT();
- ret = i915_gem_object_unbind(obj);
- if (ret)
- END_WAIT(I915_WAITSRC_WAIT_ON_RENDER_FOR_OBJ);
+ ret = i915_gem_object_unbind(obj,
+ I915_WAITSRC_EVICT);
break;
}
@@ -1957,11 +1972,11 @@ i915_gem_evict_something(struct drm_device *dev)
struct drm_i915_gem_request,
list);
- BEGIN_WAIT();
- ret = i915_wait_request(dev, request->seqno);
+ ret = i915_wait_request(dev,
+ request->seqno,
+ wait_reason);
if (ret)
break;
- END_WAIT(wait_reason);
/* if waiting caused an object to become inactive,
* then loop around and wait for it. Otherwise, we
@@ -1983,8 +1998,8 @@ i915_gem_evict_something(struct drm_device *dev)
struct drm_i915_gem_object,
list);
obj = obj_priv->obj;
-
- wait_reason = I915_WAITSRC_FLUSH_FOR_OBJ;
+
+ wait_reason = I915_WAITSRC_EVICT_FLUSH;
i915_gem_flush(dev,
obj->write_domain,
obj->write_domain);
@@ -2253,14 +2268,9 @@ try_again:
* objects to finish before trying again.
*/
if (i == dev_priv->num_fence_regs) {
- MIGHT_WAIT();
-
if (seqno == dev_priv->mm.next_gem_seqno) {
i915_gem_flush(dev,
@@ -2272,11 +2282,10 @@ try_again:
return -ENOMEM;
}
- BEGIN_WAIT();
- ret = i915_wait_request(dev, seqno);
+ ret = i915_wait_request(dev, seqno,
+ I915_WAITSRC_GET_FENCE);
if (ret)
return ret;
- END_WAIT(I915_WAITSRC_FLUSH_FOR_FENCE);
goto try_again;
}
@@ -2381,7 +2390,8 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
i915_gem_object_flush_gpu_write_domain(obj);
i915_gem_object_flush_gtt_write_domain(obj);
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj,
+ I915_WAITSRC_PUT_FENCE);
if (ret != 0)
return ret;
}
@@ -2557,7 +2567,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write,
+ int reason)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
@@ -2568,7 +2580,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, reason);
if (ret != 0)
return ret;
@@ -2600,13 +2612,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* flushes to occur.
*/
static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+ int write,
+ int reason)
{
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, reason);
if (ret != 0)
return ret;
@@ -2870,17 +2884,18 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
*/
static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
- uint64_t offset, uint64_t size)
+ uint64_t offset, uint64_t size,
+ int reason)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
if (offset == 0 && size == obj->size)
- return i915_gem_object_set_to_cpu_domain(obj, 0);
+ return i915_gem_object_set_to_cpu_domain(obj, 0, reason);
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, reason);
if (ret != 0)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
@@ -3048,7 +3063,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1,
+ I915_WAITSRC_RELOCATE);
if (ret != 0) {
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
@@ -3161,7 +3177,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
i915_file_priv->mm.last_gem_throttle_seqno =
i915_file_priv->mm.last_gem_seqno;
if (seqno)
- ret = i915_wait_request(dev, seqno);
+ ret = i915_wait_request(dev, seqno, I915_WAITSRC_THROTTLE);
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -3818,7 +3834,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
if (obj_priv->phys_obj)
i915_gem_detach_phys_object(dev, obj);
- i915_gem_object_unbind(obj);
+ i915_gem_object_unbind(obj, I915_WAITSRC_FREE);
i915_gem_free_mmap_offset(obj);
@@ -3847,7 +3863,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
return -EINVAL;
}
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, I915_WAITSRC_LEAVEVT);
if (ret != 0) {
DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 8d42deb..a942ab8 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -324,18 +324,29 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
}
#ifdef I915_WAIT_ACCOUNTING
-static int i915_wait_source_info(struct seq_file *m, void *data) {
+static int i915_wait_source_info(struct seq_file *m, void *data)
+{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
-#define WAITSRC(SRC) seq_printf(m, "%s\t%ld\t%ld\n", #SRC, \
+#define WAITSRC(SRC) seq_printf(m, "%15s %12ld %6ld\n", #SRC, \
dev_priv->mm.wait_time[I915_WAITSRC_##SRC], \
dev_priv->mm.wait_count[I915_WAITSRC_##SRC])
- WAITSRC(WAIT_FOR_ACTIVE_OBJ);
- WAITSRC(WAIT_ON_RENDER_FOR_OBJ);
- WAITSRC(FLUSH_FOR_OBJ);
- WAITSRC(FLUSH_FOR_FENCE);
+ WAITSRC(RELOCATE);
+ WAITSRC(EVICT);
+ WAITSRC(EVICT_WAIT);
+ WAITSRC(EVICT_FLUSH);
+ WAITSRC(PWRITE);
+ WAITSRC(PREAD);
+ WAITSRC(CHANGE_DOMAIN);
+ WAITSRC(FREE);
+ WAITSRC(GET_FENCE);
+ WAITSRC(PUT_FENCE);
+ WAITSRC(CHANGE_TILING);
+ WAITSRC(THROTTLE);
+ WAITSRC(LEAVEVT);
+ WAITSRC(FBO);
#undef WAITSRC
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 39ae02a..b852825 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -556,7 +556,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* need to ensure that any fence register is cleared.
*/
if (!i915_gem_object_tiling_ok(obj, args->tiling_mode))
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj,
+ I915_WAITSRC_CHANGE_TILING);
else
ret = i915_gem_object_put_fence_reg(obj);
if (ret != 0) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fe96c13..4b808e5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -704,7 +704,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1,
+ I915_WAITSRC_FBO);
if (ret != 0) {
i915_gem_object_unpin(intel_fb->obj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index e4652dc..1e1aeb0 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -468,7 +468,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
}
/* Flush everything out, we'll be doing GTT only from now on */
- i915_gem_object_set_to_gtt_domain(fbo, 1);
+ i915_gem_object_set_to_gtt_domain(fbo, 1, I915_WAITSRC_FBO);
ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
if (ret) {
--
1.6.3.1
More information about the Intel-gfx
mailing list