[PATCH 05/13] drm/ttm: overhaul memory accounting
j.glisse at gmail.com
j.glisse at gmail.com
Wed Nov 9 12:22:22 PST 2011
From: Jerome Glisse <jglisse at redhat.com>
This is an overhaul of the ttm memory accounting. This tries to keep
the same global behavior while removing the whole zone concept. It
keeps a distrinction for dma32 so that we make sure that ttm don't
starve the dma32 zone.
There is 3 threshold for memory allocation :
- max_mem is the maximum memory the whole ttm infrastructure is
going to allow allocation for (exception of system process see
below)
- emer_mem is the maximum memory allowed for system process, this
limit is > to max_mem
- swap_limit is the threshold at which point ttm will start to
try to swap object because ttm is getting close the max_mem
limit
- swap_dma32_limit is the threshold at which point ttm will start
swap object to try to reduce the pressure on the dma32 zone. Note
that we don't specificly target object to swap to it might very
well free more memory from highmem rather than from dma32
Accounting is done through used_mem & used_dma32_mem, which sum give
the total amount of memory actually accounted by ttm.
Idea is that allocation will fail if (used_mem + used_dma32_mem) >
max_mem and if swapping fail to make enough room.
The used_dma32_mem can be updated as a later stage, allowing to
perform accounting test before allocating a whole batch of pages.
Signed-off-by: Jerome Glisse <jglisse at redhat.com>
---
drivers/gpu/drm/ttm/ttm_bo.c | 2 +-
drivers/gpu/drm/ttm/ttm_memory.c | 517 +++++++++---------------------
drivers/gpu/drm/ttm/ttm_object.c | 3 +-
drivers/gpu/drm/ttm/ttm_tt.c | 2 +-
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 8 +-
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 8 +-
include/drm/ttm/ttm_memory.h | 23 +-
7 files changed, 168 insertions(+), 395 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4bde335..92712798 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1252,7 +1252,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
size_t acc_size =
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e70ddd8..b550baf 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -35,21 +35,10 @@
#include <linux/module.h>
#include <linux/slab.h>
-#define TTM_MEMORY_ALLOC_RETRIES 4
-
-struct ttm_mem_zone {
- struct kobject kobj;
- struct ttm_mem_global *glob;
- const char *name;
- uint64_t zone_mem;
- uint64_t emer_mem;
- uint64_t max_mem;
- uint64_t swap_limit;
- uint64_t used_mem;
-};
+#define TTM_MEMORY_RETRIES 4
static struct attribute ttm_mem_sys = {
- .name = "zone_memory",
+ .name = "memory",
.mode = S_IRUGO
};
static struct attribute ttm_mem_emer = {
@@ -64,140 +53,141 @@ static struct attribute ttm_mem_swap = {
.name = "swap_limit",
.mode = S_IRUGO | S_IWUSR
};
+static struct attribute ttm_mem_dma32_swap = {
+ .name = "swap_dma32_limit",
+ .mode = S_IRUGO | S_IWUSR
+};
static struct attribute ttm_mem_used = {
.name = "used_memory",
.mode = S_IRUGO
};
+static struct attribute ttm_mem_dma32_used = {
+ .name = "used_dma32_memory",
+ .mode = S_IRUGO
+};
-static void ttm_mem_zone_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
-
- printk(KERN_INFO TTM_PFX
- "Zone %7s: Used memory at exit: %llu kiB.\n",
- zone->name, (unsigned long long) zone->used_mem >> 10);
- kfree(zone);
-}
-
-static ssize_t ttm_mem_zone_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
+static ssize_t ttm_mem_global_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buffer)
{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- uint64_t val = 0;
+ struct ttm_mem_global *glob =
+ container_of(kobj, struct ttm_mem_global, kobj);
+ unsigned long val = 0;
- spin_lock(&zone->glob->lock);
+ spin_lock(&glob->lock);
if (attr == &ttm_mem_sys)
- val = zone->zone_mem;
+ val = glob->mem;
else if (attr == &ttm_mem_emer)
- val = zone->emer_mem;
+ val = glob->emer_mem;
else if (attr == &ttm_mem_max)
- val = zone->max_mem;
+ val = glob->max_mem;
else if (attr == &ttm_mem_swap)
- val = zone->swap_limit;
+ val = glob->swap_limit;
else if (attr == &ttm_mem_used)
- val = zone->used_mem;
- spin_unlock(&zone->glob->lock);
+ val = glob->used_mem;
+ else if (attr == &ttm_mem_dma32_used)
+ val = glob->used_dma32_mem;
+ else if (attr == &ttm_mem_dma32_swap)
+ val = glob->swap_dma32_limit;
+ spin_unlock(&glob->lock);
- return snprintf(buffer, PAGE_SIZE, "%llu\n",
- (unsigned long long) val >> 10);
+ return snprintf(buffer, PAGE_SIZE, "%lu\n", val >> 10);
}
static void ttm_check_swapping(struct ttm_mem_global *glob);
-static ssize_t ttm_mem_zone_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t size)
+static ssize_t ttm_mem_global_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t size)
{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- int chars;
+ struct ttm_mem_global *glob =
+ container_of(kobj, struct ttm_mem_global, kobj);
unsigned long val;
- uint64_t val64;
+ int chars;
chars = sscanf(buffer, "%lu", &val);
if (chars == 0)
return size;
- val64 = val;
- val64 <<= 10;
+ val <<= 10;
+
+ spin_lock(&glob->lock);
+ /* limit to maximum memory */
+ if (val > glob->mem)
+ val = glob->mem;
- spin_lock(&zone->glob->lock);
- if (val64 > zone->zone_mem)
- val64 = zone->zone_mem;
if (attr == &ttm_mem_emer) {
- zone->emer_mem = val64;
- if (zone->max_mem > val64)
- zone->max_mem = val64;
+ glob->emer_mem = val;
+ if (glob->max_mem > val)
+ glob->max_mem = val;
} else if (attr == &ttm_mem_max) {
- zone->max_mem = val64;
- if (zone->emer_mem < val64)
- zone->emer_mem = val64;
- } else if (attr == &ttm_mem_swap)
- zone->swap_limit = val64;
- spin_unlock(&zone->glob->lock);
+ glob->max_mem = val;
+ if (glob->emer_mem < val)
+ glob->emer_mem = val;
+ } else if (attr == &ttm_mem_swap) {
+ glob->swap_limit = val;
+ } else if (attr == &ttm_mem_dma32_swap) {
+ glob->swap_dma32_limit = val;
+ }
+ spin_unlock(&glob->lock);
+ ttm_check_swapping(glob);
+ return size;
+}
- ttm_check_swapping(zone->glob);
+static void ttm_mem_global_kobj_release(struct kobject *kobj)
+{
+ struct ttm_mem_global *glob =
+ container_of(kobj, struct ttm_mem_global, kobj);
- return size;
+ kfree(glob);
}
-static struct attribute *ttm_mem_zone_attrs[] = {
+static struct attribute *ttm_mem_global_attrs[] = {
&ttm_mem_sys,
&ttm_mem_emer,
&ttm_mem_max,
&ttm_mem_swap,
+ &ttm_mem_dma32_swap,
&ttm_mem_used,
+ &ttm_mem_dma32_used,
NULL
};
-static const struct sysfs_ops ttm_mem_zone_ops = {
- .show = &ttm_mem_zone_show,
- .store = &ttm_mem_zone_store
+static const struct sysfs_ops ttm_mem_global_ops = {
+ .show = &ttm_mem_global_show,
+ .store = &ttm_mem_global_store
};
-static struct kobj_type ttm_mem_zone_kobj_type = {
- .release = &ttm_mem_zone_kobj_release,
- .sysfs_ops = &ttm_mem_zone_ops,
- .default_attrs = ttm_mem_zone_attrs,
-};
-
-static void ttm_mem_global_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
-
- kfree(glob);
-}
-
static struct kobj_type ttm_mem_glob_kobj_type = {
.release = &ttm_mem_global_kobj_release,
+ .sysfs_ops = &ttm_mem_global_ops,
+ .default_attrs = ttm_mem_global_attrs,
};
-static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
- bool from_wq, uint64_t extra)
+static bool ttm_above_swap_target(struct ttm_mem_global *glob,
+ bool from_wq, uint64_t extra)
{
- unsigned int i;
- struct ttm_mem_zone *zone;
- uint64_t target;
+ unsigned long target;
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
-
- if (from_wq)
- target = zone->swap_limit;
- else if (capable(CAP_SYS_ADMIN))
- target = zone->emer_mem;
+ if (from_wq) {
+ if (glob->used_mem > glob->swap_limit) {
+ return true;
+ }
+ if (glob->used_dma32_mem > glob->swap_dma32_limit) {
+ return true;
+ }
+ } else {
+ if (capable(CAP_SYS_ADMIN))
+ target = glob->emer_mem;
else
- target = zone->max_mem;
-
- target = (extra > target) ? 0ULL : target;
-
- if (zone->used_mem > target)
+ target = glob->max_mem;
+ if (extra > target) {
return true;
+ }
+ if ((glob->used_mem + glob->used_dma32_mem) > target) {
+ return true;
+ }
}
return false;
}
@@ -208,31 +198,31 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
* Note that this function is reentrant:
* many threads may try to swap out at any given time.
*/
-
-static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
+static void ttm_shrink(struct ttm_mem_global *glob,
+ bool from_wq,
uint64_t extra)
{
- int ret;
struct ttm_mem_shrink *shrink;
+ int ret, nretries = TTM_MEMORY_RETRIES;
spin_lock(&glob->lock);
if (glob->shrink == NULL)
goto out;
- while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
+ while (ttm_above_swap_target(glob, from_wq, extra)) {
shrink = glob->shrink;
spin_unlock(&glob->lock);
ret = shrink->do_shrink(shrink);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
goto out;
+ if (--nretries < 0)
+ goto out;
}
out:
spin_unlock(&glob->lock);
}
-
-
static void ttm_shrink_work(struct work_struct *work)
{
struct ttm_mem_global *glob =
@@ -241,127 +231,10 @@ static void ttm_shrink_work(struct work_struct *work)
ttm_shrink(glob, true, 0ULL);
}
-static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram - si->totalhigh;
- mem *= si->mem_unit;
-
- zone->name = "kernel";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_kernel = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-
-#ifdef CONFIG_HIGHMEM
-static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone;
- uint64_t mem;
- int ret;
-
- if (si->totalhigh == 0)
- return 0;
-
- zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- zone->name = "highmem";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_highmem = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#else
-static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- /**
- * No special dma32 zone needed.
- */
-
- if (mem <= ((uint64_t) 1ULL << 32)) {
- kfree(zone);
- return 0;
- }
-
- /*
- * Limit max dma32 memory to 4GB for now
- * until we can figure out how big this
- * zone really is.
- */
-
- mem = ((uint64_t) 1ULL << 32);
- zone->name = "dma32";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_dma32 = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#endif
-
int ttm_mem_global_init(struct ttm_mem_global *glob)
{
struct sysinfo si;
int ret;
- int i;
- struct ttm_mem_zone *zone;
spin_lock_init(&glob->lock);
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
@@ -374,50 +247,29 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
return ret;
}
+ /* compute limit */
si_meminfo(&si);
-
- ret = ttm_mem_init_kernel_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#ifdef CONFIG_HIGHMEM
- ret = ttm_mem_init_highmem_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#else
- ret = ttm_mem_init_dma32_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#endif
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- printk(KERN_INFO TTM_PFX
- "Zone %7s: Available graphics memory: %llu kiB.\n",
- zone->name, (unsigned long long) zone->max_mem >> 10);
- }
- ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ glob->mem = si.totalram;
+ glob->mem *= si.mem_unit;
+ glob->used_mem = 0;
+ glob->used_dma32_mem = 0;
+ glob->max_mem = glob->mem >> 1;
+ glob->emer_mem = (glob->mem >> 1) + (glob->mem >> 2);
+ glob->swap_limit = glob->max_mem - (glob->mem >> 3);
+ glob->swap_dma32_limit = ((1ULL << 32) >> 1) - (((1ULL << 32) >> 3));
+ ttm_page_alloc_init(glob, glob->max_mem/(2*PAGE_SIZE));
return 0;
-out_no_zone:
- ttm_mem_global_release(glob);
- return ret;
}
EXPORT_SYMBOL(ttm_mem_global_init);
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
- unsigned int i;
- struct ttm_mem_zone *zone;
-
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- kobject_del(&zone->kobj);
- kobject_put(&zone->kobj);
- }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
@@ -426,18 +278,14 @@ EXPORT_SYMBOL(ttm_mem_global_release);
static void ttm_check_swapping(struct ttm_mem_global *glob)
{
bool needs_swapping = false;
- unsigned int i;
- struct ttm_mem_zone *zone;
spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (zone->used_mem > zone->swap_limit) {
- needs_swapping = true;
- break;
- }
+ if ((glob->used_mem + glob->used_dma32_mem) > glob->swap_limit) {
+ needs_swapping = true;
+ }
+ if (glob->used_dma32_mem > glob->swap_dma32_limit) {
+ needs_swapping = true;
}
-
spin_unlock(&glob->lock);
if (unlikely(needs_swapping))
@@ -445,142 +293,69 @@ static void ttm_check_swapping(struct ttm_mem_global *glob)
}
-static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem -= amount;
- }
- spin_unlock(&glob->lock);
-}
-
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
- return ttm_mem_global_free_zone(glob, NULL, amount);
-}
-EXPORT_SYMBOL(ttm_mem_global_free);
-
-static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount, bool reserve)
-{
- uint64_t limit;
- int ret = -ENOMEM;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
-
- limit = (capable(CAP_SYS_ADMIN)) ?
- zone->emer_mem : zone->max_mem;
-
- if (zone->used_mem > limit)
- goto out_unlock;
- }
-
- if (reserve) {
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem += amount;
- }
- }
-
- ret = 0;
-out_unlock:
+ glob->used_mem -= amount;
spin_unlock(&glob->lock);
- ttm_check_swapping(glob);
-
- return ret;
}
+EXPORT_SYMBOL(ttm_mem_global_free);
-
-static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t memory,
- bool no_wait, bool interruptible)
+int ttm_mem_global_alloc(struct ttm_mem_global *glob,
+ uint64_t memory,
+ bool no_wait)
{
- int count = TTM_MEMORY_ALLOC_RETRIES;
+ unsigned long limit;
+ int i;
- while (unlikely(ttm_mem_global_reserve(glob,
- single_zone,
- memory, true)
- != 0)) {
+ for (i = 0; i < TTM_MEMORY_RETRIES; i++) {
+ spin_lock(&glob->lock);
+ limit = (capable(CAP_SYS_ADMIN)) ? glob->emer_mem : glob->max_mem;
+ if ((glob->used_mem + glob->used_dma32_mem + memory) < limit) {
+ glob->used_mem += memory;
+ spin_unlock(&glob->lock);
+ return 0;
+ }
+ spin_unlock(&glob->lock);
if (no_wait)
return -ENOMEM;
- if (unlikely(count-- == 0))
- return -ENOMEM;
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
}
-
- return 0;
-}
-
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- bool no_wait, bool interruptible)
-{
- /**
- * Normal allocations of kernel memory are registered in
- * all zones.
- */
-
- return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
- interruptible);
+ return -ENOMEM;
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
- bool no_wait, bool interruptible)
+ bool no_wait)
{
- struct ttm_mem_zone *zone = NULL;
-
- /**
- * Page allocations may be registed in a single zone
- * only if highmem or !dma32.
- */
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
- interruptible);
+ if (ttm_mem_global_alloc(glob, PAGE_SIZE, no_wait))
+ return -ENOMEM;
+
+ /* check if page is dma32 */
+ if (page_to_pfn(page) > 0x00100000UL) {
+ spin_lock(&glob->lock);
+ glob->used_mem -= PAGE_SIZE;
+ glob->used_dma32_mem += PAGE_SIZE;
+ spin_unlock(&glob->lock);
+ }
+ ttm_check_swapping(glob);
+ return 0;
}
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
{
- struct ttm_mem_zone *zone = NULL;
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+ spin_lock(&glob->lock);
+ if (page_to_pfn(page) > 0x00100000UL) {
+ glob->used_dma32_mem -= PAGE_SIZE;
+ } else {
+ glob->used_mem -= PAGE_SIZE;
+ }
+ spin_unlock(&glob->lock);
}
-
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 93577f2..662c6fc 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -267,8 +267,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
}
read_unlock(&tfile->lock);
- ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
- false, false);
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false);
if (unlikely(ret != 0))
return ret;
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3fb4c6d..e5432efa 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -81,7 +81,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
p = list_first_entry(&h, struct page, lru);
- ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
+ ret = ttm_mem_global_alloc_page(mem_glob, p, false);
if (unlikely(ret != 0))
goto out_err;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 15fb260..8fd1e6b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -503,8 +503,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj *fence;
int ret;
- ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
- false, false);
+ ret = ttm_mem_global_alloc(mem_glob, fman->fence_size, false);
if (unlikely(ret != 0))
return ret;
@@ -573,8 +572,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
* be created by a user-space request.
*/
- ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
- false, false);
+ ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, false);
if (unlikely(ret != 0))
return ret;
@@ -966,7 +964,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
* event size itself.
*/
- ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
+ ret = ttm_mem_global_alloc(mem_glob, size, false);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4c..3c43ac0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -430,7 +430,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_context_size,
- false, true);
+ false);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for context"
@@ -1298,7 +1298,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
+ size, false);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface"
@@ -1560,7 +1560,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
vmw_dmabuf_acc_size(bdev->glob,
(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false);
if (unlikely(ret != 0)) {
/* we must free the bo here as
* ttm_buffer_object_init does so as well */
@@ -1851,7 +1851,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_stream_size,
- false, true);
+ false);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for stream"
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 26c1f78..d24792b 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -81,14 +81,13 @@ struct ttm_mem_global {
struct work_struct work;
wait_queue_head_t queue;
spinlock_t lock;
- struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
- unsigned int num_zones;
- struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
- struct ttm_mem_zone *zone_highmem;
-#else
- struct ttm_mem_zone *zone_dma32;
-#endif
+ unsigned long mem;
+ unsigned long max_mem;
+ unsigned long emer_mem;
+ unsigned long used_mem;
+ unsigned long used_dma32_mem;
+ unsigned long swap_limit;
+ unsigned long swap_dma32_limit;
};
/**
@@ -146,14 +145,16 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
-extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- bool no_wait, bool interruptible);
+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob,
+ uint64_t memory,
+ bool no_wait);
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
- bool no_wait, bool interruptible);
+ bool no_wait);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page);
extern size_t ttm_round_pot(size_t size);
+
#endif
--
1.7.7.1
More information about the dri-devel
mailing list