[PATCH 14/14] drm/ttm: isolate dma data from ttm_tt
j.glisse at gmail.com
j.glisse at gmail.com
Wed Nov 9 14:18:26 PST 2011
From: Jerome Glisse <jglisse at redhat.com>
Move dma data to a superset ttm_dma_tt structure which herit
from ttm_tt. This allow driver that don't use dma functionalities
to not have to waste memory for it.
Signed-off-by: Jerome Glisse <jglisse at redhat.com>
---
drivers/gpu/drm/nouveau/nouveau_bo.c | 18 +++++----
drivers/gpu/drm/nouveau/nouveau_sgdma.c | 22 +++++++----
drivers/gpu/drm/radeon/radeon_ttm.c | 43 +++++++++++-----------
drivers/gpu/drm/ttm/ttm_page_alloc.c | 10 +++---
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 38 +++++++++++---------
drivers/gpu/drm/ttm/ttm_tt.c | 58 ++++++++++++++++++++++++-----
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 2 +
include/drm/ttm/ttm_bo_driver.h | 31 +++++++++++++++-
include/drm/ttm/ttm_page_alloc.h | 12 ++----
9 files changed, 155 insertions(+), 79 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 36234a7..df3f19c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1052,6 +1052,7 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
+ struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
@@ -1065,7 +1066,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
#ifdef CONFIG_SWIOTLB
if ((dma_get_mask(dev->dev) <= DMA_BIT_MASK(32)) && swiotlb_nr_tbl()) {
- return ttm_dma_populate(ttm, dev->dev);
+ return ttm_dma_populate((void *)ttm, dev->dev);
}
#endif
@@ -1075,14 +1076,14 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
for (i = 0; i < ttm->num_pages; i++) {
- ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+ ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
+ if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
while (--i) {
- pci_unmap_page(dev->pdev, ttm->dma_address[i],
+ pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- ttm->dma_address[i] = 0;
+ ttm_dma->dma_address[i] = 0;
}
ttm_page_alloc_ttm_tt_unpopulate(ttm);
return -EFAULT;
@@ -1094,6 +1095,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
+ struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
@@ -1103,14 +1105,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
#ifdef CONFIG_SWIOTLB
if ((dma_get_mask(dev->dev) <= DMA_BIT_MASK(32)) && swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(ttm, dev->dev);
+ ttm_dma_unpopulate((void *)ttm, dev->dev);
return;
}
#endif
for (i = 0; i < ttm->num_pages; i++) {
- if (ttm->dma_address[i]) {
- pci_unmap_page(dev->pdev, ttm->dma_address[i],
+ if (ttm_dma->dma_address[i]) {
+ pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ee1eb7c..47f245e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,7 +8,10 @@
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be {
- struct ttm_tt ttm;
+ /* this has to be the first field so populate/unpopulated in
+ * nouve_bo.c works properly, otherwise have to move them here
+ */
+ struct ttm_dma_tt ttm;
struct drm_device *dev;
u64 offset;
};
@@ -20,6 +23,7 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
if (ttm) {
NV_DEBUG(nvbe->dev, "\n");
+ ttm_dma_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
}
@@ -38,7 +42,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < ttm->num_pages; i++) {
- dma_addr_t dma_offset = ttm->dma_address[i];
+ dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -97,7 +101,7 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = ttm->dma_address;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2;
u32 cnt = ttm->num_pages;
@@ -206,7 +210,7 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = ttm->dma_address;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2, tmp[4];
u32 cnt = ttm->num_pages;
int i;
@@ -282,10 +286,11 @@ static struct ttm_backend_func nv44_sgdma_backend = {
static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
/* noop: bound in move_notify() */
- node->pages = ttm->dma_address;
+ node->pages = nvbe->ttm.dma_address;
return 0;
}
@@ -316,12 +321,13 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
return NULL;
nvbe->dev = dev;
- nvbe->ttm.func = dev_priv->gart_info.func;
+ nvbe->ttm.ttm.func = dev_priv->gart_info.func;
- if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(nvbe);
return NULL;
}
- return &nvbe->ttm;
+ return &nvbe->ttm.ttm;
}
int
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 82119a5..0cb236c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -501,7 +501,7 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
* TTM backend functions.
*/
struct radeon_ttm_tt {
- struct ttm_tt ttm;
+ struct ttm_dma_tt ttm;
struct radeon_device *rdev;
u64 offset;
};
@@ -509,17 +509,16 @@ struct radeon_ttm_tt {
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
- struct radeon_ttm_tt *gtt;
+ struct radeon_ttm_tt *gtt = (void*)ttm;
int r;
- gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
- ttm->num_pages, ttm->pages, ttm->dma_address);
+ ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
@@ -530,18 +529,17 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
{
- struct radeon_ttm_tt *gtt;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
- gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
return 0;
}
static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
{
- struct radeon_ttm_tt *gtt;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
- gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
+ ttm_dma_tt_fini(>t->ttm);
if (ttm->state == tt_bound) {
radeon_ttm_backend_unbind(ttm);
}
@@ -573,17 +571,19 @@ struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
if (gtt == NULL) {
return NULL;
}
- gtt->ttm.func = &radeon_backend_func;
+ gtt->ttm.ttm.func = &radeon_backend_func;
gtt->rdev = rdev;
- if (ttm_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(gtt);
return NULL;
}
- return >t->ttm;
+ return >t->ttm.ttm;
}
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
@@ -594,7 +594,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
#ifdef CONFIG_SWIOTLB
if (rdev->need_dma32 && swiotlb_nr_tbl()) {
- return ttm_dma_populate(ttm, rdev->dev);
+ return ttm_dma_populate(>t->ttm, rdev->dev);
}
#endif
@@ -604,14 +604,14 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
}
for (i = 0; i < ttm->num_pages; i++) {
- ttm->dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, ttm->dma_address[i])) {
+ gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
while (--i) {
- pci_unmap_page(rdev->pdev, ttm->dma_address[i],
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- ttm->dma_address[i] = 0;
+ gtt->ttm.dma_address[i] = 0;
}
ttm_page_alloc_ttm_tt_unpopulate(ttm);
return -EFAULT;
@@ -623,20 +623,21 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
rdev = radeon_get_rdev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (rdev->need_dma32 && swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(ttm, rdev->dev);
+ ttm_dma_unpopulate(>t->ttm, rdev->dev);
return;
}
#endif
for (i = 0; i < ttm->num_pages; i++) {
- if (ttm->dma_address[i]) {
- pci_unmap_page(rdev->pdev, ttm->dma_address[i],
+ if (gtt->ttm.dma_address[i]) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ada2cad..a4cdd99 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -667,7 +667,7 @@ out:
* cached pages.
*/
int ttm_get_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate, dma_addr_t *dma_address)
+ enum ttm_caching_state cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
@@ -732,7 +732,7 @@ int ttm_get_pages(struct page **pages, unsigned npages, int flags,
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
- ttm_put_pages(pages, count, flags, cstate, NULL);
+ ttm_put_pages(pages, count, flags, cstate);
return r;
}
}
@@ -742,7 +742,7 @@ int ttm_get_pages(struct page **pages, unsigned npages, int flags,
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate, dma_addr_t *dma_address)
+ enum ttm_caching_state cstate)
{
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
@@ -865,7 +865,7 @@ int ttm_page_alloc_ttm_tt_populate(struct ttm_tt *ttm)
return -ENOMEM;
ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching_state, ttm->dma_address);
+ ttm->caching_state);
if (ret != 0) {
ttm_mem_global_free(mem_glob, ttm->num_pages * PAGE_SIZE);
return -ENOMEM;
@@ -892,7 +892,7 @@ void ttm_page_alloc_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_mem_global_free_pages(glob, ttm->pages, ttm->num_pages);
ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching_state, ttm->dma_address);
+ ttm->caching_state);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL(ttm_page_alloc_ttm_tt_unpopulate);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 0850836..36c7a96 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -785,12 +785,13 @@ out:
return r;
}
-static int ttm_dma_check_alloc_list(struct ttm_tt *ttm)
+static int ttm_dma_check_alloc_list(struct ttm_dma_tt *ttm_dma)
{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
struct dma_page *d_page;
unsigned i = 0;
- list_for_each_entry(d_page, &ttm->alloc_list, page_list) {
+ list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
if (d_page->p != ttm->pages[i++]) {
pr_err(TTM_PFX "dma page list & array mismatch %d", i);
return -EINVAL;
@@ -856,8 +857,9 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
* allocates one page at a time.
*/
static int ttm_dma_pool_get_pages(struct dma_pool *pool,
- struct ttm_tt *ttm)
+ struct ttm_dma_tt *ttm_dma)
{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
struct dma_page *d_page, *tmp;
unsigned long irq_flags;
unsigned i, count = ttm->num_pages;
@@ -888,9 +890,9 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
*/
list_for_each_entry_safe(d_page, tmp, &pool->free_list, page_list) {
ttm->pages[i] = d_page->p;
- ttm->dma_address[i] = d_page->dma;
+ ttm_dma->dma_address[i] = d_page->dma;
i++;
- list_move_tail(&d_page->page_list, &ttm->alloc_list);
+ list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
if (i == count)
break;
}
@@ -909,8 +911,9 @@ out:
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
-int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct dma_pool *pool;
gfp_t gfp_flags;
@@ -944,9 +947,9 @@ int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
/* ?? we might want to check see if pages are already
* alloced, for the moment just trust the caller
*/
- INIT_LIST_HEAD(&ttm->alloc_list);
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
/* Take pages out of a pool (if applicable) */
- r = ttm_dma_pool_get_pages(pool, ttm);
+ r = ttm_dma_pool_get_pages(pool, ttm_dma);
/* clear the pages coming from the pool if requested */
if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
for (i = 0; i < (ttm->num_pages - r); i++) {
@@ -974,10 +977,10 @@ int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
*/
list_for_each_entry(d_page, &d_pages, page_list) {
ttm->pages[i] = d_page->p;
- ttm->dma_address[i] = d_page->dma;
+ ttm_dma->dma_address[i] = d_page->dma;
i++;
}
- list_splice_tail(&d_pages, &ttm->alloc_list);
+ list_splice_tail(&d_pages, &ttm_dma->pages_list);
spin_lock_irqsave(&pool->lock, irq_flags);
pool->npages_in_use += pages_need;
@@ -1010,7 +1013,7 @@ int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
r = ttm_tt_swapin(ttm);
if (unlikely(r!= 0)) {
- ttm_dma_unpopulate(ttm, dev);
+ ttm_dma_unpopulate(ttm_dma, dev);
return r;
}
}
@@ -1036,8 +1039,9 @@ static int ttm_dma_pool_get_num_unused_pages(void)
}
/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *glob = ttm->glob->mem_glob;
struct dma_pool *pool;
enum pool_type type;
@@ -1063,7 +1067,7 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
}
/* this should never happen, should we panic here ?*/
- if (ttm_dma_check_alloc_list(ttm)) {
+ if (ttm_dma_check_alloc_list(ttm_dma)) {
pr_err(TTM_PFX "Could not free the other ttm tt pages %ld!\n",
ttm->num_pages);
return;
@@ -1076,7 +1080,7 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
pool->nfrees += ttm->num_pages;
} else {
pool->npages_free += ttm->num_pages;
- list_splice(&ttm->alloc_list, &pool->free_list);
+ list_splice(&ttm_dma->pages_list, &pool->free_list);
if (pool->npages_free > _manager->options.max_size) {
count = pool->npages_free - _manager->options.max_size;
}
@@ -1084,14 +1088,14 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (is_cached) {
- ttm_dma_pages_put(pool, &ttm->alloc_list,
+ ttm_dma_pages_put(pool, &ttm_dma->pages_list,
ttm->pages, ttm->num_pages);
}
- INIT_LIST_HEAD(&ttm->alloc_list);
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
- ttm->dma_address[i] = 0;
+ ttm_dma->dma_address[i] = 0;
}
/* shrink pool if necessary */
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d1be1c6..e6bbeb2 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -47,17 +47,14 @@
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
- ttm->dma_address = drm_calloc_large(ttm->num_pages,
- sizeof(*ttm->dma_address));
+ ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
}
-static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- drm_free_large(ttm->pages);
- ttm->pages = NULL;
- drm_free_large(ttm->dma_address);
- ttm->dma_address = NULL;
+ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+ ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+ sizeof(*ttm->dma_address));
}
#ifdef CONFIG_X86
@@ -168,7 +165,6 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
if (likely(ttm->pages != NULL)) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
- ttm_tt_free_page_directory(ttm);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
@@ -192,7 +188,7 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
ttm->state = tt_unpopulated;
ttm_tt_alloc_page_directory(ttm);
- if (!ttm->pages || !ttm->dma_address) {
+ if (!ttm->pages) {
ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
return -ENOMEM;
@@ -201,6 +197,48 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_tt_init);
+void ttm_tt_fini(struct ttm_tt *ttm)
+{
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+}
+EXPORT_SYMBOL(ttm_tt_fini);
+
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ ttm->bdev = bdev;
+ ttm->glob = bdev->glob;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+ ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+
+ ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (!ttm->pages || !ttm_dma->dma_address) {
+ ttm_tt_destroy(ttm);
+ printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_dma_tt_init);
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+ drm_free_large(ttm_dma->dma_address);
+ ttm_dma->dma_address = NULL;
+}
+EXPORT_SYMBOL(ttm_dma_tt_fini);
+
void ttm_tt_unbind(struct ttm_tt *ttm)
{
int ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 383993b..03f28da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -168,6 +168,7 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ ttm_tt_fini(ttm);
kfree(vmw_be);
}
@@ -191,6 +192,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(vmw_be);
return NULL;
}
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index beef9ab..89caa48 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -104,7 +104,6 @@ enum ttm_caching_state {
* @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages.
* @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
- * @alloc_list: used by some page allocation backend
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -127,8 +126,23 @@ struct ttm_tt {
tt_unbound,
tt_unpopulated,
} state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+ struct ttm_tt ttm;
dma_addr_t *dma_address;
- struct list_head alloc_list;
+ struct list_head pages_list;
};
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
@@ -595,6 +609,19 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Free memory of ttm_tt structu
+ */
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
/**
* ttm_ttm_bind:
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index f127635..e628505 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -36,13 +36,11 @@
* @npages: number of pages to allocate.
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state for the page.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/
int ttm_get_pages(struct page **pages,
unsigned npages,
int flags,
- enum ttm_caching_state cstate,
- dma_addr_t *dma_address);
+ enum ttm_caching_state cstate);
/**
* Put linked list of pages to pool.
*
@@ -50,13 +48,11 @@ int ttm_get_pages(struct page **pages,
* @npages: number of pages to free.
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/
void ttm_put_pages(struct page **pages,
unsigned npages,
int flags,
- enum ttm_caching_state cstate,
- dma_addr_t *dma_address);
+ enum ttm_caching_state cstate);
/**
* Initialize pool allocator.
*/
@@ -106,8 +102,8 @@ void ttm_dma_page_alloc_fini(void);
*/
extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev);
-extern void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev);
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
#else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
--
1.7.7.1
More information about the dri-devel
mailing list