[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: a0d1084cb55cce427783635674691faa8e982328 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=a0d1084cb55cce427783635674691faa8e982328 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: bc0058ac420b833229ee5b44fcef986378fd6042 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=bc0058ac420b833229ee5b44fcef986378fd6042 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 37b8773733d916ea6699a394348cb45aaf04cbbe URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=37b8773733d916ea6699a394348cb45aaf04cbbe Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 5be253664e22eea0dc7dd9a947975b06fa92fc20 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=5be253664e22eea0dc7dd9a947975b06fa92fc20 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 641270dc93dda28fed107e80bd05768ae7e9f7af URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=641270dc93dda28fed107e80bd05768ae7e9f7af Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: c90bfe8bb4dea675c1758e9675d731b3920a4d02 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=c90bfe8bb4dea675c1758e9675d731b3920a4d02 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: ba512cfecd3056f3cc4aff34e8f946d252a9ff35 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=ba512cfecd3056f3cc4aff34e8f946d252a9ff35 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 93499718f7c0cc68537bc78db99f0785b1119689 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=93499718f7c0cc68537bc78db99f0785b1119689 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: ea32eae8d98390d8346361610140bbbe1091795e URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=ea32eae8d98390d8346361610140bbbe1091795e Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: b598ba86afea76a9d4873a4bfb982e9219cd973e URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=b598ba86afea76a9d4873a4bfb982e9219cd973e Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 5b003a9ae9aca2a15a55c278435cf0694a83ddd6 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=5b003a9ae9aca2a15a55c278435cf0694a83ddd6 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 3572e5011fd31083cfa75e5cd1f1a3b293f3f201 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=3572e5011fd31083cfa75e5cd1f1a3b293f3f201 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 81dba9933e2bc2c5839647ac6fb7b863b25f8d38 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=81dba9933e2bc2c5839647ac6fb7b863b25f8d38 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 3326a042e81d230a29d187ab2a97517bc19577e3 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=3326a042e81d230a29d187ab2a97517bc19577e3 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 497eb6c45bc47869562b66f855ad7ba3dc5196ba URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=497eb6c45bc47869562b66f855ad7ba3dc5196ba Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: next Commit: 34d5c0478261bec5c5bdf139943e27dc02ac810a URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=34d5c0478261bec5c5bdf139943e27dc02ac810a Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: arm64 Commit: eaa1ad25ac9a2b86d8c18c3a2a8bca5c677d1758 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=eaa1ad25ac9a2b86d8c18c3a2a8bca5c677d1758 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread
[Xenomai-git] Dmitriy Cherkasov : cobalt/arm64: add lazy FPU switching
Module: xenomai-3 Branch: arm64 Commit: dd1e86f3c5c2007c74c448334b21a00d8b40f0c5 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=dd1e86f3c5c2007c74c448334b21a00d8b40f0c5 Author: Dmitriy CherkasovDate: Thu Oct 1 15:47:41 2015 -0700 cobalt/arm64: add lazy FPU switching --- kernel/cobalt/arch/arm64/thread.c | 83 +++-- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/kernel/cobalt/arch/arm64/thread.c b/kernel/cobalt/arch/arm64/thread.c index 316525f6..db369be 100644 --- a/kernel/cobalt/arch/arm64/thread.c +++ b/kernel/cobalt/arch/arm64/thread.c @@ -32,16 +32,31 @@ #include #include #include - +#include #if defined(CONFIG_XENO_ARCH_FPU) -static DEFINE_MUTEX(vfp_check_lock); +static void enable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + orr x1, x1, #(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} +static void disable_fpsimd(void) { + __asm__ __volatile__("mrs x1, cpacr_el1\n\ + and x1, x1, #~(0x3 << 20)\n\ + msr cpacr_el1, x1\n\ + isb" : : : "x1", "memory", "cc"); +} int xnarch_fault_fpu_p(struct ipipe_trap_data *d) { - /* FPU never trapped, this will be a fault */ + /* check if this is an FPU access trap to be handled by Xenomai */ + if(d->exception == IPIPE_TRAP_FPU_ACC){ + return 1; + } + /* FPU already enabled, propagate fault to kernel */ return 0; } @@ -53,6 +68,7 @@ void xnarch_leave_root(struct xnthread *root) { struct xnarchtcb *rootcb = xnthread_archtcb(root); rootcb->fpup = get_fpu_owner(rootcb); + disable_fpsimd(); } void xnarch_save_fpu(struct xnthread *thread) @@ -65,45 +81,67 @@ void xnarch_save_fpu(struct xnthread *thread) void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) { struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; - - /* always switch, no lazy switching */ - struct fpsimd_state *const to_fpup = to->tcb.fpup; - if (from_fpup == to_fpup) - return; + /* +* This only gets called if XNFPU flag is set, or if migrating to Linux. +* In both cases, this means turn on FPU and switch. +*/ + enable_fpsimd(); + + if (xnthread_test_state(to, XNROOT) == 0) { + if (from_fpup == to_fpup) + return; - if (from_fpup) - fpsimd_save_state(from_fpup); + if (from_fpup) + fpsimd_save_state(from_fpup); - fpsimd_load_state(to_fpup); + fpsimd_load_state(to_fpup); + } + else { + /* Going to Linux. */ + if (from_fpup) + fpsimd_save_state(from_fpup); - /* always set FPU enabled */ - xnthread_set_state(to, XNFPU); + fpsimd_load_state(to_fpup); + } } int xnarch_handle_fpu_fault(struct xnthread *from, struct xnthread *to, struct ipipe_trap_data *d) { - /* FPU always enabled, faults force exit to Linux */ - return 0; + spl_t s; + + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + + xnlock_get_irqsave(, s); + xnthread_set_state(to, XNFPU); + xnlock_put_irqrestore(, s); + + xnarch_switch_fpu(from, to); + + return 1; + } void xnarch_init_shadow_tcb(struct xnthread *thread) { + spl_t s; struct xnarchtcb *tcb = xnthread_archtcb(thread); tcb->fpup = &(tcb->core.host_task->thread.fpsimd_state); - /* XNFPU is always set, no lazy switching */ - xnthread_set_state(thread, XNFPU); + xnlock_get_irqsave(, s); + xnthread_clear_state(thread, XNFPU); + xnlock_put_irqrestore(, s); + } #endif /* CONFIG_XENO_ARCH_FPU */ - /* Switch support functions */ - static void xnarch_tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; @@ -141,8 +179,7 @@ static inline void xnarch_contextidr_thread_switch(struct task_struct *next) { } #endif - -/*/Switch support functions */ +/* End switch support functions */ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) { @@ -173,6 +210,10 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in) xnarch_tls_thread_switch(in_tcb->core.tip->task); xnarch_contextidr_thread_switch(in_tcb->core.tip->task); + /* check if we need to switch FPU on return to Linux */ + if (xnthread_test_state(in, XNROOT) == 1) + xnarch_switch_fpu(out, in); + /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread