[Spice-devel] [PATCH 4/8] Always release free resources when allocating

alexl at redhat.com alexl at redhat.com
Fri Aug 20 11:54:36 PDT 2010


From: Alexander Larsson <alexl at redhat.com>

Leaving around potentially freeable resources when you're about
to allocate just increases the risk for fragmentation, which is bad,
especially for the vram where we do many large allocations in a
limited space.

This also makes all consumers of the release_ring use the malloc
semaphore to make such accesses thread-safe.
---
 display/qxldd.h |    1 -
 display/res.c   |   72 ++++++++++++++++++++++++++-----------------------------
 2 files changed, 34 insertions(+), 39 deletions(-)

diff --git a/display/qxldd.h b/display/qxldd.h
index 7d22a0d..9f09aa8 100644
--- a/display/qxldd.h
+++ b/display/qxldd.h
@@ -178,7 +178,6 @@ typedef struct DevRes {
     MspaceInfo mspaces[NUM_MSPACES];
 
     BOOL need_init;
-    UINT64 free_outputs;
     UINT32 update_id;
 
     DevResDynamic *dynamic;
diff --git a/display/res.c b/display/res.c
index 15e14ae..b30951f 100644
--- a/display/res.c
+++ b/display/res.c
@@ -36,8 +36,6 @@
 #define WAIT_FOR_EVENT(pdev, event, timeout) EngWaitForSingleObject(event, timeout)
 #endif
 
-#define SURFACE_ALLOC_RELEASE_BUNCH_SIZE 3
-
 static _inline QXLPHYSICAL PA(PDev *pdev, PVOID virt, UINT8 slot_id)
 {
     PMemSlot *p_slot = &pdev->mem_slots[slot_id];
@@ -124,6 +122,13 @@ UINT64 ReleaseOutput(PDev *pdev, UINT64 output_id)
     return next;
 }
 
+static void ReleaseOutputs(PDev *pdev, UINT64 output_id)
+{
+    while (output_id != 0) {
+        output_id = ReleaseOutput(pdev, output_id);
+    }
+}
+
 static void AddRes(PDev *pdev, QXLOutput *output, Resource *res)
 {
     DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__));
@@ -285,10 +290,21 @@ static void WaitForReleaseRing(PDev* pdev)
     DEBUG_PRINT((pdev, 16, "%s: 0x%lx, done\n", __FUNCTION__, pdev));
 }
 
+static void EmptyReleaseRing(PDev *pdev)
+{
+    UINT64 outputs;
+    int notify;
+
+    while (SPICE_RING_IS_EMPTY(pdev->release_ring)) {
+        outputs = *SPICE_RING_CONS_ITEM(pdev->release_ring);
+        SPICE_RING_POP(pdev->release_ring, notify);
+        ReleaseOutputs(pdev, outputs);
+    }
+}
+
 // todo: separate VRAM releases from DEVRAM releases
-#define AllocMem(pdev, mspace_type, size) __AllocMem(pdev, mspace_type, size, TRUE, 1)
-static void *__AllocMem(PDev* pdev, UINT32 mspace_type, size_t size,
-                        BOOL force, INT32 release_bunch)
+#define AllocMem(pdev, mspace_type, size) __AllocMem(pdev, mspace_type, size, TRUE)
+static void *__AllocMem(PDev* pdev, UINT32 mspace_type, size_t size, BOOL force)
 {
     UINT8 *ptr;
 
@@ -296,35 +312,19 @@ static void *__AllocMem(PDev* pdev, UINT32 mspace_type, size_t size,
     DEBUG_PRINT((pdev, 12, "%s: 0x%lx size %u\n", __FUNCTION__, pdev, size));
 
     EngAcquireSemaphore(pdev->malloc_sem);
-    while (!(ptr = mspace_malloc(pdev->Res.mspaces[mspace_type]._mspace, size))) {
-        int notify;
-        int num_to_release = release_bunch;
 
-        while (pdev->Res.free_outputs && num_to_release) {
-            pdev->Res.free_outputs = ReleaseOutput(pdev, pdev->Res.free_outputs);
-            num_to_release--;
-        }
+    do {
+        /* Release all queued resources, before allocating, as we 
+           want to release early to minimize fragmentation risks. */
+        EmptyReleaseRing(pdev);
 
-        if (!num_to_release) {
-            continue;
-        }
+        ptr = mspace_malloc(pdev->Res.mspaces[mspace_type]._mspace, size);
 
-        if (force) {
+        if (ptr == NULL && force) {
             WaitForReleaseRing(pdev);
-        } else {
-            if (SPICE_RING_IS_EMPTY(pdev->release_ring)) {
-                break;
-            }
         }
+    } while (ptr == NULL && force);
 
-        pdev->Res.free_outputs = *SPICE_RING_CONS_ITEM(pdev->release_ring);
-        SPICE_RING_POP(pdev->release_ring, notify);
-
-        while (pdev->Res.free_outputs && num_to_release) {
-            pdev->Res.free_outputs = ReleaseOutput(pdev, pdev->Res.free_outputs);
-            num_to_release--;
-        }
-    }
     EngReleaseSemaphore(pdev->malloc_sem);
     ASSERT(pdev, (!ptr && !force) || (ptr >= pdev->Res.mspaces[mspace_type].mspace_start &&
                                       ptr < pdev->Res.mspaces[mspace_type].mspace_end));
@@ -411,7 +411,6 @@ static void InitRes(PDev *pdev)
         PANIC(pdev, "Res surfaces_info allocation failed\n");
     }
 
-    pdev->Res.free_outputs = 0;
     InitMspace(&pdev->Res, MSPACE_TYPE_DEVRAM, pdev->io_pages_virt, pdev->num_io_pages * PAGE_SIZE);
     InitMspace(&pdev->Res, MSPACE_TYPE_VRAM, pdev->fb, pdev->fb_size);
     pdev->Res.update_id = *pdev->dev_update_id;
@@ -619,8 +618,7 @@ _inline void GetSurfaceMemory(PDev *pdev, UINT32 x, UINT32 y, UINT32 depth, UINT
     case DEVICE_BITMAP_ALLOCATION_TYPE_VRAM: { 
         *stride = x * depth / 8;
         *stride = ALIGN(*stride, 4);
-        *base_mem = __AllocMem(pdev, MSPACE_TYPE_VRAM, (*stride) * y, FALSE,
-                               SURFACE_ALLOC_RELEASE_BUNCH_SIZE);
+        *base_mem = __AllocMem(pdev, MSPACE_TYPE_VRAM, (*stride) * y, FALSE);
         *phys_mem = PA(pdev, (PVOID)((UINT64)*base_mem), pdev->vram_mem_slot);
         break;
     }
@@ -1328,14 +1326,12 @@ static CacheImage *AllocCacheImage(PDev* pdev)
 {
     RingItem *item;
     while (!(item = RingGetTail(pdev, &pdev->Res.dynamic->cache_image_lru))) {
-        int notify;
-        if (pdev->Res.free_outputs) {
-            pdev->Res.free_outputs = ReleaseOutput(pdev, pdev->Res.free_outputs);
-            continue;
+        EngAcquireSemaphore(pdev->malloc_sem);
+        if (SPICE_RING_IS_EMPTY(pdev->release_ring)) {
+            WaitForReleaseRing(pdev);
         }
-        WaitForReleaseRing(pdev);
-        pdev->Res.free_outputs = *SPICE_RING_CONS_ITEM(pdev->release_ring);
-        SPICE_RING_POP(pdev->release_ring, notify);
+        EmptyReleaseRing(pdev);
+        EngReleaseSemaphore(pdev->malloc_sem);
     }
     RingRemove(pdev, item);
     return CONTAINEROF(item, CacheImage, lru_link);
-- 
1.7.2.1



More information about the Spice-devel mailing list