[RFC v2 1/2] ppc/fpu: Add generic FPU api similar to x86
Anson Jacob
Anson.Jacob at amd.com
Wed Jul 21 04:48:00 UTC 2021
- Add kernel_fpu_begin & kernel_fpu_end API as x86
- Add logic similar to x86 to ensure fpu
begin/end call correctness
- Add kernel_fpu_enabled to know if FPU is enabled
v2:
- Added asm/fpu/api.h powerpc variant with kernel_fpu_begin/end()
and kernel_fpu_enabled() declarations
- Updated kernel_fpu_enabled as EXPORT_SYMBOL_GPL
Signed-off-by: Anson Jacob <Anson.Jacob at amd.com>
CC: Christoph Hellwig <hch at infradead.org>
CC: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
CC: Harry Wentland <harry.wentland at amd.com>
CC: Christian König <christian.koenig at amd.com>
---
arch/powerpc/include/asm/fpu/api.h | 18 ++++
arch/powerpc/include/asm/switch_to.h | 25 +-----
arch/powerpc/kernel/process.c | 130 +++++++++++++++++++++++++++
3 files changed, 151 insertions(+), 22 deletions(-)
create mode 100644 arch/powerpc/include/asm/fpu/api.h
diff --git a/arch/powerpc/include/asm/fpu/api.h b/arch/powerpc/include/asm/fpu/api.h
new file mode 100644
index 000000000000..57308cdc65c9
--- /dev/null
+++ b/arch/powerpc/include/asm/fpu/api.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_POWERPC_FPU_API_H
+#define _ASM_POWERPC_FPU_API_H
+
+/*
+ * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
+ * disables preemption so be careful if you intend to use it for long periods
+ * of time.
+ * TODO: If you intend to use the FPU in irq/softirq you need to check first with
+ * irq_fpu_usable() if it is possible.
+ */
+
+extern bool kernel_fpu_enabled(void);
+extern void kernel_fpu_begin(void);
+extern void kernel_fpu_end(void);
+
+#endif /* _ASM_POWERPC_FPU_API_H */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 9d1fbd8be1c7..a9a919279f48 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -41,10 +41,7 @@ extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void giveup_fpu(struct task_struct *);
extern void save_fpu(struct task_struct *);
-static inline void disable_kernel_fp(void)
-{
- msr_check_and_clear(MSR_FP);
-}
+extern void disable_kernel_fp(void);
#else
static inline void save_fpu(struct task_struct *t) { }
static inline void flush_fp_to_thread(struct task_struct *t) { }
@@ -55,10 +52,7 @@ extern void enable_kernel_altivec(void);
extern void flush_altivec_to_thread(struct task_struct *);
extern void giveup_altivec(struct task_struct *);
extern void save_altivec(struct task_struct *);
-static inline void disable_kernel_altivec(void)
-{
- msr_check_and_clear(MSR_VEC);
-}
+extern void disable_kernel_altivec(void);
#else
static inline void save_altivec(struct task_struct *t) { }
static inline void __giveup_altivec(struct task_struct *t) { }
@@ -67,20 +61,7 @@ static inline void __giveup_altivec(struct task_struct *t) { }
#ifdef CONFIG_VSX
extern void enable_kernel_vsx(void);
extern void flush_vsx_to_thread(struct task_struct *);
-static inline void disable_kernel_vsx(void)
-{
- msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
-}
-#else
-static inline void enable_kernel_vsx(void)
-{
- BUILD_BUG();
-}
-
-static inline void disable_kernel_vsx(void)
-{
- BUILD_BUG();
-}
+extern void disable_kernel_vsx(void);
#endif
#ifdef CONFIG_SPE
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 185beb290580..969096c0123c 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -75,6 +75,17 @@
#define TM_DEBUG(x...) do { } while(0)
#endif
+/*
+ * Track whether the kernel is using the FPU state
+ * currently.
+ *
+ * This flag is used:
+ *
+ * - kernel_fpu_begin()/end() correctness
+ * - kernel_fpu_enabled info
+ */
+static DEFINE_PER_CPU(bool, in_kernel_fpu);
+
extern unsigned long _get_SP(void);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -212,6 +223,9 @@ void enable_kernel_fp(void)
unsigned long cpumsr;
WARN_ON(preemptible());
+ WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));
+
+ this_cpu_write(in_kernel_fpu, true);
cpumsr = msr_check_and_set(MSR_FP);
@@ -231,6 +245,15 @@ void enable_kernel_fp(void)
}
}
EXPORT_SYMBOL(enable_kernel_fp);
+
+void disable_kernel_fp(void)
+{
+ WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+
+ this_cpu_write(in_kernel_fpu, false);
+ msr_check_and_clear(MSR_FP);
+}
+EXPORT_SYMBOL(disable_kernel_fp);
#else
static inline void __giveup_fpu(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_FPU */
@@ -263,6 +286,9 @@ void enable_kernel_altivec(void)
unsigned long cpumsr;
WARN_ON(preemptible());
+ WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));
+
+ this_cpu_write(in_kernel_fpu, true);
cpumsr = msr_check_and_set(MSR_VEC);
@@ -283,6 +309,14 @@ void enable_kernel_altivec(void)
}
EXPORT_SYMBOL(enable_kernel_altivec);
+void disable_kernel_altivec(void)
+{
+ WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+
+ this_cpu_write(in_kernel_fpu, false);
+ msr_check_and_clear(MSR_VEC);
+}
+EXPORT_SYMBOL(disable_kernel_altivec);
/*
* Make sure the VMX/Altivec register state in the
* the thread_struct is up to date for task tsk.
@@ -333,6 +367,9 @@ void enable_kernel_vsx(void)
unsigned long cpumsr;
WARN_ON(preemptible());
+ WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));
+
+ this_cpu_write(in_kernel_fpu, true);
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
@@ -354,6 +391,15 @@ void enable_kernel_vsx(void)
}
EXPORT_SYMBOL(enable_kernel_vsx);
+void disable_kernel_vsx(void)
+{
+ WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+
+ this_cpu_write(in_kernel_fpu, false);
+ msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
+}
+EXPORT_SYMBOL(disable_kernel_vsx);
+
void flush_vsx_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
@@ -406,6 +452,90 @@ void flush_spe_to_thread(struct task_struct *tsk)
}
#endif /* CONFIG_SPE */
+static bool fpu_support(void)
+{
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+ return true;
+ } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+ return true;
+ } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool kernel_fpu_enabled(void)
+{
+ return this_cpu_read(in_kernel_fpu);
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_enabled);
+
+void kernel_fpu_begin(void)
+{
+ if (!fpu_support()) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ preempt_disable();
+
+#ifdef CONFIG_VSX
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+ enable_kernel_vsx();
+ return;
+ }
+#endif
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+ enable_kernel_altivec();
+ return;
+ }
+#endif
+
+#ifdef CONFIG_PPC_FPU
+ if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+ enable_kernel_fp();
+ return;
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+
+void kernel_fpu_end(void)
+{
+ if (!fpu_support()) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+#ifdef CONFIG_VSX
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+ disable_kernel_vsx();
+ goto done;
+ }
+#endif
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+ disable_kernel_altivec();
+ goto done;
+ }
+#endif
+
+#ifdef CONFIG_PPC_FPU
+ if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+ disable_kernel_fp();
+ goto done;
+ }
+#endif
+
+done:
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_end);
+
static unsigned long msr_all_available;
static int __init init_msr_all_available(void)
--
2.25.1
More information about the amd-gfx
mailing list