[PATCH 82/87] softirq: Prefer low-latency over ksoftirqd for HI_SOFTIRQ

Chris Wilson chris at chris-wilson.co.uk
Sat Mar 24 11:41:58 UTC 2018


References: 4cd13c21b207 ("softirq: Let ksoftirqd do its job")
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 arch/x86/include/asm/hardirq.h |  7 +++++++
 include/asm-generic/hardirq.h  |  1 +
 include/linux/interrupt.h      |  2 ++
 include/linux/irq_cpustat.h    |  3 +++
 kernel/softirq.c               | 39 +++++++++++++++++++++------------------
 5 files changed, 34 insertions(+), 18 deletions(-)

diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 7c341a74ec8c..90d9ace63735 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -7,6 +7,7 @@
 
 typedef struct {
 	unsigned int __softirq_pending;
+	unsigned int __ksoftirqd_overflow;
 	unsigned int __nmi_count;	/* arch dependent */
 #ifdef CONFIG_X86_LOCAL_APIC
 	unsigned int apic_timer_irqs;	/* arch dependent */
@@ -50,6 +51,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 #define inc_irq_stat(member)	this_cpu_inc(irq_stat.member)
 
 #define local_softirq_pending()	this_cpu_read(irq_stat.__softirq_pending)
+#define local_ksoftirqd_overflow()	this_cpu_read(irq_stat.__ksoftirqd_overflow)
 
 #define __ARCH_SET_SOFTIRQ_PENDING
 
@@ -57,6 +59,11 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 		this_cpu_write(irq_stat.__softirq_pending, (x))
 #define or_softirq_pending(x)	this_cpu_or(irq_stat.__softirq_pending, (x))
 
+#define set_ksoftirqd_overflow(x)	\
+		this_cpu_write(irq_stat.__ksoftirqd_overflow, (x))
+#define or_ksoftirqd_overflow(x)	\
+		this_cpu_or(irq_stat.__ksoftirqd_overflow, (x))
+
 extern void ack_bad_irq(unsigned int irq);
 
 extern u64 arch_irq_stat_cpu(unsigned int cpu);
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index d14214dfc10b..e506fb8193df 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -7,6 +7,7 @@
 
 typedef struct {
 	unsigned int __softirq_pending;
+	unsigned int __ksoftirqd_overflow;
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 69c238210325..88d57b28f438 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -437,6 +437,8 @@ extern bool force_irqthreads;
 #ifndef __ARCH_SET_SOFTIRQ_PENDING
 #define set_softirq_pending(x) (local_softirq_pending() = (x))
 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
+#define set_ksoftirqd_overflow(x) (local_ksoftirqd_overflow() = (x))
+#define or_ksoftirqd_overflow(x)  (local_ksoftirqd_overflow() |= (x))
 #endif
 
 /* Some architectures might implement lazy enabling/disabling of
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
index 4954948d1973..ed7663591203 100644
--- a/include/linux/irq_cpustat.h
+++ b/include/linux/irq_cpustat.h
@@ -26,6 +26,9 @@ extern irq_cpustat_t irq_stat[];		/* defined in asm/hardirq.h */
 #define local_softirq_pending() \
 	__IRQ_STAT(smp_processor_id(), __softirq_pending)
 
+#define local_ksoftirqd_overflow() \
+	__IRQ_STAT(smp_processor_id(), __ksoftirqd_overflow)
+
   /* arch dependent irq_stat fields */
 #define nmi_count(cpu)		__IRQ_STAT((cpu), __nmi_count)	/* i386 */
 
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 24d243ef8e71..4fb5a126927a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -68,12 +68,16 @@ const char * const softirq_to_name[NR_SOFTIRQS] = {
  * to the pending events, so lets the scheduler to balance
  * the softirq load for us.
  */
-static void wakeup_softirqd(void)
+static void wakeup_softirqd(unsigned long pending)
 {
 	/* Interrupts are disabled: no need to stop preemption */
 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
-	if (tsk && tsk->state != TASK_RUNNING)
+	if (!tsk || local_ksoftirqd_overflow() & pending)
+		return;
+
+	or_ksoftirqd_overflow(pending);
+	if (tsk->state != TASK_RUNNING)
 		wake_up_process(tsk);
 }
 
@@ -81,11 +85,10 @@ static void wakeup_softirqd(void)
  * If ksoftirqd is scheduled, we do not want to process pending softirqs
  * right now. Let ksoftirqd handle this at its own rate, to get fairness.
  */
-static bool ksoftirqd_running(void)
+static bool defer_to_ksoftirqd(__u32 pending)
 {
-	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
-
-	return tsk && (tsk->state == TASK_RUNNING);
+	pending &= ~HI_SOFTIRQ;
+	return local_ksoftirqd_overflow() & pending;
 }
 
 /*
@@ -303,7 +306,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
 		    --max_restart)
 			goto restart;
 
-		wakeup_softirqd();
+		wakeup_softirqd(pending);
 	}
 
 	lockdep_softirq_end(in_hardirq);
@@ -324,8 +327,7 @@ asmlinkage __visible void do_softirq(void)
 	local_irq_save(flags);
 
 	pending = local_softirq_pending();
-
-	if (pending && !ksoftirqd_running())
+	if (pending && !defer_to_ksoftirqd(pending))
 		do_softirq_own_stack();
 
 	local_irq_restore(flags);
@@ -350,12 +352,12 @@ void irq_enter(void)
 	__irq_enter();
 }
 
-static inline void invoke_softirq(void)
+static inline void invoke_softirq(__u32 pending)
 {
-	if (ksoftirqd_running())
+	if (!pending)
 		return;
 
-	if (!force_irqthreads) {
+	if (!force_irqthreads && !defer_to_ksoftirqd(pending)) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 		/*
 		 * We can safely execute softirq on the current stack if
@@ -372,7 +374,7 @@ static inline void invoke_softirq(void)
 		do_softirq_own_stack();
 #endif
 	} else {
-		wakeup_softirqd();
+		wakeup_softirqd(pending);
 	}
 }
 
@@ -401,8 +403,8 @@ void irq_exit(void)
 #endif
 	account_irq_exit_time(current);
 	preempt_count_sub(HARDIRQ_OFFSET);
-	if (!in_interrupt() && local_softirq_pending())
-		invoke_softirq();
+	if (!in_interrupt())
+		invoke_softirq(local_softirq_pending());
 
 	tick_irq_exit();
 	rcu_irq_exit();
@@ -426,7 +428,7 @@ inline void raise_softirq_irqoff(unsigned int nr)
 	 * schedule the softirq soon.
 	 */
 	if (!in_interrupt())
-		wakeup_softirqd();
+		wakeup_softirqd(local_softirq_pending());
 }
 
 void raise_softirq(unsigned int nr)
@@ -652,13 +654,14 @@ void __init softirq_init(void)
 
 static int ksoftirqd_should_run(unsigned int cpu)
 {
-	return local_softirq_pending();
+	return local_ksoftirqd_overflow();
 }
 
 static void run_ksoftirqd(unsigned int cpu)
 {
 	local_irq_disable();
-	if (local_softirq_pending()) {
+	if (local_ksoftirqd_overflow()) {
+		set_ksoftirqd_overflow(0);
 		/*
 		 * We can safely run softirq on inline stack, as we are not deep
 		 * in the task stack here.
-- 
2.16.3



More information about the Intel-gfx-trybot mailing list