[Intel-gfx] [PATCH 19/37] drm/i915: Test coherency of and barriers between cache domains
Matthew Auld
matthew.william.auld at gmail.com
Fri Jan 13 11:44:23 UTC 2017
On 11 January 2017 at 21:09, Chris Wilson <chris at chris-wilson.co.uk> wrote:
> Write into an object using WB, WC, GTT, and GPU paths and make sure that
> our internal API is sufficient to ensure coherent reads and writes.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/i915_gem.c | 1 +
> .../gpu/drm/i915/selftests/i915_gem_coherency.c | 355 +++++++++++++++++++++
> .../gpu/drm/i915/selftests/i915_live_selftests.h | 1 +
> 3 files changed, 357 insertions(+)
> create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 4a52c5872898..242d894b356e 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -4899,4 +4899,5 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
> #include "selftests/mock_gem_device.c"
> #include "selftests/huge_gem_object.c"
> #include "selftests/i915_gem_object.c"
> +#include "selftests/i915_gem_coherency.c"
> #endif
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
> new file mode 100644
> index 000000000000..3e57b7a3c73f
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
> @@ -0,0 +1,355 @@
> +/*
> + * Copyright © 2017 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + *
> + */
> +
> +#include <linux/prime_numbers.h>
> +
> +#include "i915_selftest.h"
> +#include "i915_random.h"
> +
> +static int cpu_set(struct drm_i915_gem_object *obj,
> + unsigned long offset,
> + u32 v)
> +{
> + unsigned int needs_clflush;
> + struct page *page;
> + typeof(v) *map;
Are you expecting typeof v to change, so less churn ?
> + int err;
> +
> + err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
> + if (err)
> + return err;
> +
> + page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
> + map = kmap_atomic(page);
> + if (needs_clflush & CLFLUSH_BEFORE)
> + clflush(map+offset_in_page(offset) / sizeof(*map));
> + map[offset_in_page(offset) / sizeof(*map)] = v;
> + if (needs_clflush & CLFLUSH_AFTER)
> + clflush(map+offset_in_page(offset) / sizeof(*map));
> + kunmap_atomic(map);
> +
> + i915_gem_obj_finish_shmem_access(obj);
> + return 0;
> +}
> +
> +static int cpu_get(struct drm_i915_gem_object *obj,
> + unsigned long offset,
> + u32 *v)
> +{
> + unsigned int needs_clflush;
> + struct page *page;
> + typeof(v) map;
> + int err;
> +
> + err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
> + if (err)
> + return err;
> +
> + page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
> + map = kmap_atomic(page);
> + if (needs_clflush & CLFLUSH_BEFORE)
> + clflush(map+offset_in_page(offset) / sizeof(*map));
> + *v = map[offset_in_page(offset) / sizeof(*map)];
> + kunmap_atomic(map);
> +
> + i915_gem_obj_finish_shmem_access(obj);
> + return 0;
> +}
> +
> +static int gtt_set(struct drm_i915_gem_object *obj,
> + unsigned long offset,
> + u32 v)
> +{
> + struct i915_vma *vma;
> + typeof(v) *map;
> + int err;
> +
> + err = i915_gem_object_set_to_gtt_domain(obj, true);
> + if (err)
> + return err;
> +
> + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
> + if (IS_ERR(vma))
> + return PTR_ERR(vma);
> +
> + map = i915_vma_pin_iomap(vma);
> + i915_vma_unpin(vma);
> + if (IS_ERR(map))
> + return PTR_ERR(map);
> +
> + map[offset / sizeof(*map)] = v;
> + i915_vma_unpin_iomap(vma);
> +
> + return 0;
> +}
> +
> +static int gtt_get(struct drm_i915_gem_object *obj,
> + unsigned long offset,
> + u32 *v)
> +{
> + struct i915_vma *vma;
> + typeof(v) map;
> + int err;
> +
> + err = i915_gem_object_set_to_gtt_domain(obj, false);
> + if (err)
> + return err;
> +
> + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
> + if (IS_ERR(vma))
> + return PTR_ERR(vma);
> +
> + map = i915_vma_pin_iomap(vma);
> + i915_vma_unpin(vma);
> + if (IS_ERR(map))
> + return PTR_ERR(map);
> +
> + *v = map[offset / sizeof(*map)];
> + i915_vma_unpin_iomap(vma);
> +
> + return 0;
> +}
> +
> +static int wc_set(struct drm_i915_gem_object *obj,
> + unsigned long offset,
> + u32 v)
> +{
> + typeof(v) *map;
> + int err;
> +
> + /* XXX GTT write followed by WC write go missing */
> + i915_gem_object_flush_gtt_write_domain(obj);
> +
> + err = i915_gem_object_set_to_gtt_domain(obj, true);
> + if (err)
> + return err;
> +
> + map = i915_gem_object_pin_map(obj, I915_MAP_WC);
> + if (IS_ERR(map))
> + return PTR_ERR(map);
> +
> + map[offset / sizeof(*map)] = v;
> + i915_gem_object_unpin_map(obj);
> +
> + return 0;
> +}
> +
> +static int wc_get(struct drm_i915_gem_object *obj,
> + unsigned long offset,
> + u32 *v)
> +{
> + typeof(v) map;
> + int err;
> +
> + /* XXX WC write followed by GTT write go missing */
> + i915_gem_object_flush_gtt_write_domain(obj);
> +
> + err = i915_gem_object_set_to_gtt_domain(obj, false);
> + if (err)
> + return err;
> +
> + map = i915_gem_object_pin_map(obj, I915_MAP_WC);
> + if (IS_ERR(map))
> + return PTR_ERR(map);
> +
> + *v = map[offset / sizeof(*map)];
> + i915_gem_object_unpin_map(obj);
> +
> + return 0;
> +}
> +
> +static int gpu_set(struct drm_i915_gem_object *obj,
> + unsigned long offset,
> + u32 v)
> +{
> + struct drm_i915_private *i915 = to_i915(obj->base.dev);
> + struct drm_i915_gem_request *rq;
> + struct i915_vma *vma;
> + int err;
> +
> + err = i915_gem_object_set_to_gtt_domain(obj, true);
> + if (err)
> + return err;
> +
> + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
> + if (IS_ERR(vma))
> + return PTR_ERR(vma);
> +
> + rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
> + if (IS_ERR(rq)) {
> + i915_vma_unpin(vma);
> + return PTR_ERR(rq);
> + }
> +
> + err = intel_ring_begin(rq, 4);
> + if (err) {
> + __i915_add_request(rq, false);
> + i915_vma_unpin(vma);
> + return err;
> + }
> +
> + if (INTEL_GEN(i915) >= 8) {
> + intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM_GEN4 | 1 << 22);
> + intel_ring_emit(rq->ring, lower_32_bits(i915_ggtt_offset(vma) + offset));
> + intel_ring_emit(rq->ring, upper_32_bits(i915_ggtt_offset(vma) + offset));
> + intel_ring_emit(rq->ring, v);
> + } else if (INTEL_GEN(i915) >= 4) {
> + intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM_GEN4 | 1 << 22);
> + intel_ring_emit(rq->ring, 0);
> + intel_ring_emit(rq->ring, i915_ggtt_offset(vma) + offset);
> + intel_ring_emit(rq->ring, v);
> + } else {
> + intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM | 1 << 22);
> + intel_ring_emit(rq->ring, i915_ggtt_offset(vma) + offset);
> + intel_ring_emit(rq->ring, v);
> + intel_ring_emit(rq->ring, MI_NOOP);
> + }
> + intel_ring_advance(rq->ring);
> +
> + i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
> + i915_vma_unpin(vma);
> +
> + ww_mutex_lock(&obj->resv->lock, NULL);
> + reservation_object_add_excl_fence(obj->resv, &rq->fence);
> + ww_mutex_unlock(&obj->resv->lock);
> +
> + __i915_add_request(rq, true);
> +
> + return 0;
> +}
> +
> +static const struct igt_coherency_mode {
> + const char *name;
> + int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
> + int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
> +} igt_coherency_mode[] = {
> + { "cpu", cpu_set, cpu_get },
> + { "gtt", gtt_set, gtt_get },
> + { "wc", wc_set, wc_get },
> + { "gpu", gpu_set, NULL },
> + { },
> +};
> +
> +static int igt_gem_coherency(void *arg)
> +{
> + const unsigned int ncachelines = PAGE_SIZE/64;
> + I915_RND_STATE(prng);
> + struct drm_i915_private *i915 = arg;
> + const struct igt_coherency_mode *read, *write, *over;
> + struct drm_i915_gem_object *obj;
> + unsigned long count, n;
> + u32 *offsets, *values;
> + int err;
> +
> + offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
> + if (!offsets)
> + return -ENOMEM;
> + for (count = 0; count < ncachelines; count++)
> + offsets[count] = count * 64 + 4 * (count % 16);
> +
> + values = offsets + ncachelines;
> +
> + mutex_lock(&i915->drm.struct_mutex);
> + for (over = igt_coherency_mode; over->name; over++) {
> + if (!over->set)
> + continue;
> +
> + for (write = igt_coherency_mode; write->name; write++) {
> + if (!write->set)
> + continue;
> +
> + for (read = igt_coherency_mode; read->name; read++) {
> + if (!read->get)
> + continue;
> +
> + for_each_prime_number_from(count, 1, ncachelines) {
> + obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
> + if (IS_ERR(obj))
This looks a little nasty, err may be uninitialised, and even worse it
looks like the if (obj) check will pass.
Otherwise:
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
More information about the Intel-gfx
mailing list