[PATCH 06/13] drm/msm: get an iova from the address space instead of an id
Jordan Crouse
jcrouse at codeaurora.org
Mon May 8 20:35:02 UTC 2017
In the future we won't have a fixed set of addresses spaces.
Instead of going through the effort of assigning a ID for each
address space just use the address space itself as a token for
getting / putting an iova.
This forces a few changes in the gem object however: instead
of using a simple index into a list of domains, we need to
maintain a list of them. Luckily the list will be pretty small;
even with dynamic address spaces we wouldn't ever see more than
two or three.
Signed-off-by: Jordan Crouse <jcrouse at codeaurora.org>
---
drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 8 +-
drivers/gpu/drm/msm/adreno/a5xx_power.c | 5 +-
drivers/gpu/drm/msm/adreno/adreno_gpu.c | 6 +-
drivers/gpu/drm/msm/dsi/dsi_host.c | 15 +++-
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 8 +-
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 16 ++--
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 4 -
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 13 +--
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 5 +-
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 11 +--
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 4 -
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 13 +--
drivers/gpu/drm/msm/msm_drv.c | 15 +---
drivers/gpu/drm/msm/msm_drv.h | 25 +++---
drivers/gpu/drm/msm/msm_fb.c | 15 ++--
drivers/gpu/drm/msm/msm_fbdev.c | 10 ++-
drivers/gpu/drm/msm/msm_gem.c | 134 +++++++++++++++++++++---------
drivers/gpu/drm/msm/msm_gem.h | 4 +-
drivers/gpu/drm/msm/msm_gem_submit.c | 4 +-
drivers/gpu/drm/msm/msm_gpu.c | 8 +-
drivers/gpu/drm/msm/msm_gpu.h | 1 -
drivers/gpu/drm/msm/msm_kms.h | 3 +
22 files changed, 193 insertions(+), 134 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 2d3af90..fd54cc7 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -306,7 +306,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
}
if (iova) {
- int ret = msm_gem_get_iova(bo, gpu->id, iova);
+ int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
if (ret) {
drm_gem_object_unreference_unlocked(bo);
@@ -693,19 +693,19 @@ static void a5xx_destroy(struct msm_gpu *gpu)
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id);
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id);
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index ed0802e..2fdee44 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -301,7 +301,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err;
- if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
+ if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
+ &a5xx_gpu->gpmu_iova))
goto err;
ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
@@ -330,7 +331,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
err:
if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
if (a5xx_gpu->gpmu_bo)
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 5b63fc6..868a969 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -409,7 +409,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return -ENOMEM;
}
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+ ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -428,7 +428,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
if (adreno_gpu->memptrs_iova)
- msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+ msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4f79b10..4a58653 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -28,6 +28,7 @@
#include <linux/regmap.h>
#include <video/mipi_display.h>
+#include "msm_kms.h"
#include "dsi.h"
#include "dsi.xml.h"
#include "sfpb.xml.h"
@@ -975,6 +976,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
+ struct msm_drm_private *priv = dev->dev_private;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
uint64_t iova;
@@ -991,7 +993,13 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
return ret;
}
- ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
+ if (!priv->kms) {
+ pr_err("%s: No KMS is initialized\n", __func__);
+ return -ENODEV;
+ }
+
+ ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj,
+ priv->kms->aspace, &iova);
mutex_unlock(&dev->struct_mutex);
if (ret) {
pr_err("%s: failed to get iova, %d\n", __func__, ret);
@@ -1023,9 +1031,12 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
{
struct drm_device *dev = msm_host->dev;
+ struct msm_drm_private *priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
- msm_gem_put_iova(msm_host->tx_gem_obj, 0);
+ if (priv->kms)
+ msm_gem_put_iova(msm_host->tx_gem_obj,
+ priv->kms->aspace);
mutex_lock(&dev->struct_mutex);
msm_gem_free_object(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index f29194a..ca45f47 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -127,7 +127,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
- msm_gem_put_iova(val, mdp4_kms->id);
+ msm_gem_put_iova(val, mdp4_kms->base.base.aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -372,7 +372,8 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+ msm_gem_get_iova_locked(next_bo,
+ mdp4_kms->base.base.aspace, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -429,7 +430,8 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->base.base.aspace,
+ &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 6295204..34425b16 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -160,10 +160,11 @@ static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+ struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, aspace);
+
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
@@ -536,7 +537,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mdp4_kms->aspace = aspace;
+ kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
@@ -548,13 +549,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
aspace = NULL;
}
- mdp4_kms->id = msm_register_address_space(dev, aspace);
- if (mdp4_kms->id < 0) {
- ret = mdp4_kms->id;
- dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
- goto fail;
- }
-
ret = modeset_init(mdp4_kms);
if (ret) {
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
@@ -571,7 +565,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 62712ca..0eacaf0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -32,9 +32,6 @@ struct mdp4_kms {
int rev;
- /* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
-
void __iomem *mmio;
struct regulator *vdd;
@@ -43,7 +40,6 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
- struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 53619d0..c48984d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp4_kms->id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->base.base.aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp4_kms->id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->base.base.aspace);
}
@@ -161,6 +161,7 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
+ struct msm_kms *kms = &mdp4_kms->base.base;
enum mdp4_pipe pipe = mdp4_plane->pipe;
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -172,13 +173,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ msm_framebuffer_iova(fb, kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ msm_framebuffer_iova(fb, kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+ msm_framebuffer_iova(fb, kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 9217e0d..dfa3c14 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -161,7 +161,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
- msm_gem_put_iova(val, mdp5_kms->id);
+ msm_gem_put_iova(val, mdp5_kms->base.base.aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -758,7 +758,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->base.base.aspace,
+ &cursor_addr);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index d3d6b4c..b3db53a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -163,7 +163,7 @@ static void mdp5_set_encoder_mode(struct msm_kms *kms,
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_gem_address_space *aspace = mdp5_kms->aspace;
+ struct msm_gem_address_space *aspace = kms->aspace;
int i;
for (i = 0; i < mdp5_kms->num_hwmixers; i++)
@@ -692,7 +692,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
- mdp5_kms->aspace = aspace;
+ kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
@@ -707,13 +707,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;;
}
- mdp5_kms->id = msm_register_address_space(dev, aspace);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
- }
-
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 8bdb7ee..17caa0e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -55,10 +55,6 @@ struct mdp5_kms {
struct mdp5_state *state;
struct drm_modeset_lock state_lock;
- /* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
- struct msm_gem_address_space *aspace;
-
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index a38c5fe..4c0e1ae 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -279,7 +279,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp5_kms->id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->base.base.aspace);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -292,7 +292,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp5_kms->id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->base.base.aspace);
}
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
@@ -495,6 +495,7 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
enum mdp5_pipe pipe,
struct drm_framebuffer *fb)
{
+ struct msm_kms *kms = &mdp5_kms->base.base;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
@@ -504,13 +505,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ msm_framebuffer_iova(fb, kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ msm_framebuffer_iova(fb, kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ msm_framebuffer_iova(fb, kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+ msm_framebuffer_iova(fb, kms->aspace, 3));
}
/* Note: mdp5_plane->pipe_lock must be locked */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 257ee81..005ae06 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -51,19 +51,6 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
.atomic_state_free = msm_atomic_state_free,
};
-int msm_register_address_space(struct drm_device *dev,
- struct msm_gem_address_space *aspace)
-{
- struct msm_drm_private *priv = dev->dev_private;
-
- if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace)))
- return -EINVAL;
-
- priv->aspace[priv->num_aspaces] = aspace;
-
- return priv->num_aspaces++;
-}
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -707,7 +694,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
- return msm_gem_get_iova(obj, priv->gpu->id, iova);
+ return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
}
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 28b6f9b..69e839c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -183,9 +183,6 @@ int msm_atomic_commit(struct drm_device *dev,
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
-int msm_register_address_space(struct drm_device *dev,
- struct msm_gem_address_space *aspace);
-
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
@@ -209,13 +206,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint64_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova);
-uint64_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -250,9 +250,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 5cf165c..36505f3 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -84,14 +84,15 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+ ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -100,21 +101,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], id);
+ msm_gem_put_iova(msm_fb->planes[i], aspace);
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+ return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 951e40f..e071828 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -20,6 +20,7 @@
#include "drm_crtc.h"
#include "drm_fb_helper.h"
#include "msm_gem.h"
+#include "msm_kms.h"
extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
@@ -73,6 +74,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
{
struct msm_fbdev *fbdev = to_msm_fbdev(helper);
struct drm_device *dev = helper->dev;
+ struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
@@ -124,7 +126,13 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+
+ if (!priv->kms) {
+ ret = -ENODEV;
+ goto fail_unlock;
+ }
+
+ ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr);
if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7a82eaa..ffdc733 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -283,22 +283,57 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+ if (domain) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+}
+
static void
put_iova(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
+ struct msm_gem_vma *domain, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- if (!priv->aspace[id])
- continue;
- msm_gem_unmap_vma(priv->aspace[id],
- &msm_obj->domain[id], msm_obj->sgt);
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ msm_gem_unmap_vma(domain->aspace, domain, msm_obj->sgt);
+ obj_remove_domain(domain);
+ }
+}
+
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->aspace = aspace;
+
+ list_add_tail(&domain->list, &msm_obj->domains);
+
+ return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
+
+ list_for_each_entry(domain, &msm_obj->domains, list) {
+ if (domain->aspace == aspace)
+ return domain;
}
+
+ return NULL;
}
/* should be called under struct_mutex.. although it can be called
@@ -308,49 +343,64 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
* That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t.
*/
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint64_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ struct msm_gem_vma *domain;
int ret = 0;
- if (!msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct page **pages = get_pages(obj);
+ if (!iommu_present(&platform_bus_type)) {
+ pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (iommu_present(&platform_bus_type)) {
- ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
- msm_obj->sgt, obj->size >> PAGE_SHIFT);
- } else {
- msm_obj->domain[id].iova = physaddr(obj);
+ *iova = physaddr(obj);
+ return 0;
+ }
+
+ domain = obj_get_domain(obj, aspace);
+
+ if (!domain) {
+ domain = obj_add_domain(obj, aspace);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ obj_remove_domain(domain);
+ return PTR_ERR(pages);
}
+
+ ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+ obj->size >> PAGE_SHIFT);
}
if (!ret)
- *iova = msm_obj->domain[id].iova;
+ *iova = domain->iova;
+ else
+ obj_remove_domain(domain);
return ret;
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
int ret;
- /* this is safe right now because we don't unmap until the
- * bo is deleted:
- */
- if (msm_obj->domain[id].iova) {
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+ if (domain) {
+ *iova = domain->iova;
return 0;
}
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, id, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
@@ -358,14 +408,18 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_obj->domain[id].iova);
- return msm_obj->domain[id].iova;
+ struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+ WARN_ON(!domain);
+
+ return domain ? domain->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -619,11 +673,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct dma_fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
- unsigned id;
+ struct msm_gem_vma *domain;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -645,8 +698,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
- for (id = 0; id < priv->num_aspaces; id++)
- seq_printf(m, " %08llx", msm_obj->domain[id].iova);
+ /* FIXME: we need to print the address space here too */
+ list_for_each_entry(domain, &msm_obj->domains, list)
+ seq_printf(m, " %08llx", domain->iova);
seq_printf(m, " %zu%s\n", obj->size, madv);
@@ -781,8 +835,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (!msm_obj)
return -ENOMEM;
- if (use_vram)
- msm_obj->vram_node = &msm_obj->domain[0].node;
+ if (use_vram) {
+ struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
+ /* FIXME: Error here? */
+ if (domain)
+ msm_obj->vram_node = &domain->node;
+ }
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
@@ -795,6 +853,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
}
INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->domains);
+
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
*obj = &msm_obj->base;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 1b4cf20..2767014 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -37,7 +37,9 @@ struct msm_gem_address_space {
struct msm_gem_vma {
struct drm_mm_node node;
+ struct msm_gem_address_space *aspace;
uint64_t iova;
+ struct list_head list;
};
struct msm_gem_object {
@@ -77,7 +79,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct msm_gem_vma domain[NUM_DOMAINS];
+ struct list_head domains;
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c51cf03..4c2525e 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -158,7 +158,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+ msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -246,7 +246,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->gpu->aspace, &iova);
if (ret)
break;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 97b9c38..8e5174fa 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -414,7 +414,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->id);
+ msm_gem_put_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_unreference(&msm_obj->base);
}
@@ -496,7 +496,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
@@ -658,8 +658,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_address_space(drm, gpu->aspace);
-
/* Create ringbuffer: */
mutex_lock(&drm->struct_mutex);
@@ -693,7 +691,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
if (gpu->rb) {
if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->id);
+ msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 6364502..4892e18 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -98,7 +98,6 @@ struct msm_gpu {
int irq;
struct msm_gem_address_space *aspace;
- int id;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index faa22c7..a8f2ba5 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -72,6 +72,9 @@ struct msm_kms {
/* irq number to be passed on to drm_irq_install */
int irq;
+
+ /* mapper-id used to request GEM buffer mapped for scanout: */
+ struct msm_gem_address_space *aspace;
};
/**
--
1.9.1
More information about the dri-devel
mailing list