[PATCH RFC v7 14/23] dept: Add a mechanism to refill the internal memory pools on running out
Byungchul Park
byungchul.park at lge.com
Mon Jan 9 03:33:42 UTC 2023
Dept engine works in a constrained environment. For example, Dept cannot
make use of dynamic allocation e.g. kmalloc(). So Dept has been using
static pools to keep memory chunks Dept uses.
However, Dept would barely work once any of the pools gets run out. So
implemented a mechanism for the refill on the lack by any chance, using
irq work and workqueue that fits on the contrained environment.
Signed-off-by: Byungchul Park <byungchul.park at lge.com>
---
include/linux/dept.h | 19 ++++++--
kernel/dependency/dept.c | 104 ++++++++++++++++++++++++++++++++++------
kernel/dependency/dept_object.h | 10 ++--
kernel/dependency/dept_proc.c | 8 ++--
4 files changed, 112 insertions(+), 29 deletions(-)
diff --git a/include/linux/dept.h b/include/linux/dept.h
index 625c645..21ecefc 100644
--- a/include/linux/dept.h
+++ b/include/linux/dept.h
@@ -336,9 +336,19 @@ struct dept_pool {
size_t obj_sz;
/*
- * the number of the static array
+ * the remaining number of the object in spool
*/
- atomic_t obj_nr;
+ int obj_nr;
+
+ /*
+ * the number of the object in spool
+ */
+ int tot_nr;
+
+ /*
+ * accumulated amount of memory used by the object in byte
+ */
+ atomic_t acc_sz;
/*
* offset of ->pool_node
@@ -348,9 +358,10 @@ struct dept_pool {
/*
* pointer to the pool
*/
- void *spool;
+ void *spool; /* static pool */
+ void *rpool; /* reserved pool */
struct llist_head boot_pool;
- struct llist_head __percpu *lpool;
+ struct llist_head __percpu *lpool; /* local pool */
};
struct dept_ecxt_held {
diff --git a/kernel/dependency/dept.c b/kernel/dependency/dept.c
index 2f215c2..11d4f75 100644
--- a/kernel/dependency/dept.c
+++ b/kernel/dependency/dept.c
@@ -73,6 +73,9 @@
#include <linux/hash.h>
#include <linux/dept.h>
#include <linux/utsname.h>
+#include <linux/workqueue.h>
+#include <linux/irq_work.h>
+#include <linux/vmalloc.h>
#include "dept_internal.h"
static int dept_stop;
@@ -121,10 +124,12 @@
WARN(1, "DEPT_STOP: " s); \
})
-#define DEPT_INFO_ONCE(s...) pr_warn_once("DEPT_INFO_ONCE: " s)
+#define DEPT_INFO_ONCE(s...) pr_warn_once("DEPT_INFO_ONCE: " s)
+#define DEPT_INFO(s...) pr_warn("DEPT_INFO: " s)
static arch_spinlock_t dept_spin = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static arch_spinlock_t stage_spin = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t dept_pool_spin = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/*
* DEPT internal engine should be careful in using outside functions
@@ -263,6 +268,7 @@ static inline bool valid_key(struct dept_key *k)
#define OBJECT(id, nr) \
static struct dept_##id spool_##id[nr]; \
+static struct dept_##id rpool_##id[nr]; \
static DEFINE_PER_CPU(struct llist_head, lpool_##id);
#include "dept_object.h"
#undef OBJECT
@@ -271,14 +277,70 @@ struct dept_pool dept_pool[OBJECT_NR] = {
#define OBJECT(id, nr) { \
.name = #id, \
.obj_sz = sizeof(struct dept_##id), \
- .obj_nr = ATOMIC_INIT(nr), \
+ .obj_nr = nr, \
+ .tot_nr = nr, \
+ .acc_sz = ATOMIC_INIT(sizeof(spool_##id) + sizeof(rpool_##id)), \
.node_off = offsetof(struct dept_##id, pool_node), \
.spool = spool_##id, \
+ .rpool = rpool_##id, \
.lpool = &lpool_##id, },
#include "dept_object.h"
#undef OBJECT
};
+static void dept_wq_work_fn(struct work_struct *work)
+{
+ int i;
+
+ for (i = 0; i < OBJECT_NR; i++) {
+ struct dept_pool *p = dept_pool + i;
+ int sz = p->tot_nr * p->obj_sz;
+ void *rpool;
+ bool need;
+
+ arch_spin_lock(&dept_pool_spin);
+ need = !p->rpool;
+ arch_spin_unlock(&dept_pool_spin);
+
+ if (!need)
+ continue;
+
+ rpool = vmalloc(sz);
+
+ if (!rpool) {
+ DEPT_STOP("Failed to extend internal resources.\n");
+ break;
+ }
+
+ arch_spin_lock(&dept_pool_spin);
+ if (!p->rpool) {
+ p->rpool = rpool;
+ rpool = NULL;
+ atomic_add(sz, &p->acc_sz);
+ }
+ arch_spin_unlock(&dept_pool_spin);
+
+ if (rpool)
+ vfree(rpool);
+ else
+ DEPT_INFO("Dept object(%s) just got refilled successfully.\n", p->name);
+ }
+}
+
+static DECLARE_WORK(dept_wq_work, dept_wq_work_fn);
+
+static void dept_irq_work_fn(struct irq_work *w)
+{
+ schedule_work(&dept_wq_work);
+}
+
+static DEFINE_IRQ_WORK(dept_irq_work, dept_irq_work_fn);
+
+static void request_rpool_refill(void)
+{
+ irq_work_queue(&dept_irq_work);
+}
+
/*
* Can use llist no matter whether CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG is
* enabled or not because NMI and other contexts in the same CPU never
@@ -314,19 +376,31 @@ static void *from_pool(enum object_t t)
/*
* Try static pool.
*/
- if (atomic_read(&p->obj_nr) > 0) {
- int idx = atomic_dec_return(&p->obj_nr);
+ arch_spin_lock(&dept_pool_spin);
+
+ if (!p->obj_nr) {
+ p->spool = p->rpool;
+ p->obj_nr = p->rpool ? p->tot_nr : 0;
+ p->rpool = NULL;
+ request_rpool_refill();
+ }
+
+ if (p->obj_nr) {
+ void *ret;
+
+ p->obj_nr--;
+ ret = p->spool + (p->obj_nr * p->obj_sz);
+ arch_spin_unlock(&dept_pool_spin);
- if (idx >= 0)
- return p->spool + (idx * p->obj_sz);
+ return ret;
}
+ arch_spin_unlock(&dept_pool_spin);
- DEPT_INFO_ONCE("---------------------------------------------\n"
- " Some of Dept internal resources are run out.\n"
- " Dept might still work if the resources get freed.\n"
- " However, the chances are Dept will suffer from\n"
- " the lack from now. Needs to extend the internal\n"
- " resource pools. Ask max.byungchul.park at gmail.com\n");
+ DEPT_INFO("------------------------------------------\n"
+ " Dept object(%s) is run out.\n"
+ " Dept is trying to refill the object.\n"
+ " Nevertheless, if it fails, Dept will stop.\n",
+ p->name);
return NULL;
}
@@ -2971,8 +3045,8 @@ void __init dept_init(void)
pr_info("... DEPT_MAX_ECXT_HELD : %d\n", DEPT_MAX_ECXT_HELD);
pr_info("... DEPT_MAX_SUBCLASSES : %d\n", DEPT_MAX_SUBCLASSES);
#define OBJECT(id, nr) \
- pr_info("... memory used by %s: %zu KB\n", \
- #id, B2KB(sizeof(struct dept_##id) * nr));
+ pr_info("... memory initially used by %s: %zu KB\n", \
+ #id, B2KB(sizeof(spool_##id) + sizeof(rpool_##id)));
#include "dept_object.h"
#undef OBJECT
#define HASH(id, bits) \
@@ -2980,6 +3054,6 @@ void __init dept_init(void)
#id, B2KB(sizeof(struct hlist_head) * (1UL << bits)));
#include "dept_hash.h"
#undef HASH
- pr_info("... total memory used by objects and hashs: %zu KB\n", B2KB(mem_total));
+ pr_info("... total memory initially used by objects and hashs: %zu KB\n", B2KB(mem_total));
pr_info("... per task memory footprint: %zu bytes\n", sizeof(struct dept_task));
}
diff --git a/kernel/dependency/dept_object.h b/kernel/dependency/dept_object.h
index 0b7eb16..4f936ad 100644
--- a/kernel/dependency/dept_object.h
+++ b/kernel/dependency/dept_object.h
@@ -6,8 +6,8 @@
* nr: # of the object that should be kept in the pool.
*/
-OBJECT(dep, 1024 * 8)
-OBJECT(class, 1024 * 8)
-OBJECT(stack, 1024 * 32)
-OBJECT(ecxt, 1024 * 16)
-OBJECT(wait, 1024 * 32)
+OBJECT(dep, 1024 * 4 * 2)
+OBJECT(class, 1024 * 4)
+OBJECT(stack, 1024 * 4 * 8)
+OBJECT(ecxt, 1024 * 4 * 2)
+OBJECT(wait, 1024 * 4 * 4)
diff --git a/kernel/dependency/dept_proc.c b/kernel/dependency/dept_proc.c
index 7d61dfb..f07a512 100644
--- a/kernel/dependency/dept_proc.c
+++ b/kernel/dependency/dept_proc.c
@@ -73,12 +73,10 @@ static int dept_stats_show(struct seq_file *m, void *v)
{
int r;
- seq_puts(m, "Availability in the static pools:\n\n");
+ seq_puts(m, "Accumulated amount of memory used by pools:\n\n");
#define OBJECT(id, nr) \
- r = atomic_read(&dept_pool[OBJECT_##id].obj_nr); \
- if (r < 0) \
- r = 0; \
- seq_printf(m, "%s\t%d/%d(%d%%)\n", #id, r, nr, (r * 100) / (nr));
+ r = atomic_read(&dept_pool[OBJECT_##id].acc_sz); \
+ seq_printf(m, "%s\t%d KB\n", #id, r / 1024);
#include "dept_object.h"
#undef OBJECT
--
1.9.1
More information about the dri-devel
mailing list