[Intel-gfx] [RFC PATCH v2 1/2] drm/i915: Export current required functions for GVT
Zhenyu Wang
zhenyuw at linux.intel.com
Mon Nov 26 06:05:53 UTC 2018
This trys to export all required i915 functions for GVT.
Signed-off-by: Zhenyu Wang <zhenyuw at linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++++
drivers/gpu/drm/i915/i915_gem_context.c | 2 ++
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 1 +
drivers/gpu/drm/i915/i915_gem_fence_reg.c | 2 ++
drivers/gpu/drm/i915/i915_gem_gtt.c | 1 +
drivers/gpu/drm/i915/i915_request.c | 3 +++
drivers/gpu/drm/i915/i915_vma.c | 2 ++
drivers/gpu/drm/i915/intel_ringbuffer.c | 1 +
drivers/gpu/drm/i915/intel_runtime_pm.c | 2 ++
drivers/gpu/drm/i915/intel_uncore.c | 3 +++
10 files changed, 28 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c55b1f75c980..9af6e9810f85 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -701,6 +701,7 @@ void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
{
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
+EXPORT_SYMBOL_GPL(i915_gem_object_alloc);
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
@@ -1029,6 +1030,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
i915_gem_object_unpin_pages(obj);
return ret;
}
+EXPORT_SYMBOL_GPL(i915_gem_obj_prepare_shmem_write);
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
@@ -2764,6 +2766,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
list_add(&obj->mm.link, &i915->mm.unbound_list);
spin_unlock(&i915->mm.obj_lock);
}
+EXPORT_SYMBOL_GPL(__i915_gem_object_set_pages);
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
@@ -2930,6 +2933,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
ptr = ERR_PTR(ret);
goto out_unlock;
}
+EXPORT_SYMBOL_GPL(i915_gem_object_pin_map);
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
@@ -4041,6 +4045,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
i915_gem_object_unpin_pages(obj);
return 0;
}
+EXPORT_SYMBOL_GPL(i915_gem_object_set_to_gtt_domain);
/**
* Changes the cache-level of an object across all VMA.
@@ -4406,6 +4411,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
+EXPORT_SYMBOL_GPL(i915_gem_object_set_to_cpu_domain);
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
@@ -4535,6 +4541,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return vma;
}
+EXPORT_SYMBOL_GPL(i915_gem_object_ggtt_pin);
static __always_inline unsigned int __busy_read_flag(unsigned int id)
{
@@ -4758,6 +4765,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
+EXPORT_SYMBOL_GPL(i915_gem_object_init);
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
@@ -4864,6 +4872,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
i915_gem_object_free(obj);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(i915_gem_object_create);
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
@@ -5061,6 +5070,7 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
else
i915_gem_object_put(obj);
}
+EXPORT_SYMBOL_GPL(__i915_gem_object_release_unless_active);
void i915_gem_sanitize(struct drm_i915_private *i915)
{
@@ -6168,6 +6178,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
sg = i915_gem_object_get_sg(obj, n, &offset);
return nth_page(sg_page(sg), offset);
}
+EXPORT_SYMBOL_GPL(i915_gem_object_get_page);
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b97963db0287..212b8d176d25 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -274,6 +274,7 @@ void i915_gem_context_release(struct kref *ref)
if (llist_add(&ctx->free_link, &i915->contexts.free_list))
queue_work(i915->wq, &i915->contexts.free_work);
}
+EXPORT_SYMBOL_GPL(i915_gem_context_release);
static void context_close(struct i915_gem_context *ctx)
{
@@ -473,6 +474,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
return ctx;
}
+EXPORT_SYMBOL_GPL(i915_gem_context_create_gvt);
static void
destroy_kernel_context(struct i915_gem_context **ctxp)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82e2ca17a441..ac98b094220c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -244,6 +244,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return drm_gem_dmabuf_export(dev, &exp_info);
}
+EXPORT_SYMBOL_GPL(i915_gem_prime_export);
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d548ac05ccd7..21b79c1f61d0 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -420,6 +420,7 @@ i915_reserve_fence(struct drm_i915_private *dev_priv)
list_del(&fence->link);
return fence;
}
+EXPORT_SYMBOL_GPL(i915_reserve_fence);
/**
* i915_unreserve_fence - Reclaim a reserved fence
@@ -433,6 +434,7 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
list_add(&fence->link, &fence->i915->mm.fence_list);
}
+EXPORT_SYMBOL_GPL(i915_unreserve_fence);
/**
* i915_gem_revoke_fences - revoke fence state
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index add1fe7aeb93..75dd6ebf517e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -4040,6 +4040,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
size, alignment, color,
start, end, DRM_MM_INSERT_EVICT);
}
+EXPORT_SYMBOL_GPL(i915_gem_gtt_insert);
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 71107540581d..0048fd9bb899 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -749,6 +749,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
intel_context_unpin(ce);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(i915_request_alloc);
static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
@@ -1073,6 +1074,7 @@ void i915_request_add(struct i915_request *request)
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
}
+EXPORT_SYMBOL_GPL(i915_request_add);
static unsigned long local_clock_us(unsigned int *cpu)
{
@@ -1339,6 +1341,7 @@ long i915_request_wait(struct i915_request *rq,
return timeout;
}
+EXPORT_SYMBOL_GPL(i915_request_wait);
static void ring_retire_requests(struct intel_ring *ring)
{
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 5b4d78cdb4ca..704f18a8ecd3 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -783,6 +783,7 @@ void i915_vma_close(struct i915_vma *vma)
*/
list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
}
+EXPORT_SYMBOL_GPL(i915_vma_close);
void i915_vma_reopen(struct i915_vma *vma)
{
@@ -1028,6 +1029,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
export_fence(vma, rq, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(i915_vma_move_to_active);
int i915_vma_unbind(struct i915_vma *vma)
{
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 87eebc13c0d8..c0d8693f1110 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1957,6 +1957,7 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
return cs;
}
+EXPORT_SYMBOL_GPL(intel_ring_begin);
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1c2de9b69a19..90a15e8d2afa 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -4004,6 +4004,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
}
+EXPORT_SYMBOL_GPL(intel_runtime_pm_get);
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
@@ -4087,6 +4088,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
}
+EXPORT_SYMBOL_GPL(intel_runtime_pm_put);
/**
* intel_runtime_pm_enable - enable runtime pm
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9289515108c3..35293ac7dff9 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -644,6 +644,7 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
__intel_uncore_forcewake_get(dev_priv, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+EXPORT_SYMBOL_GPL(intel_uncore_forcewake_get);
/**
* intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
@@ -756,6 +757,7 @@ void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
__intel_uncore_forcewake_put(dev_priv, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+EXPORT_SYMBOL_GPL(intel_uncore_forcewake_put);
/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
@@ -2388,6 +2390,7 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
return fw_domains;
}
+EXPORT_SYMBOL_GPL(intel_uncore_forcewake_for_reg);
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_uncore.c"
--
2.19.1
More information about the Intel-gfx
mailing list