On 2/26/2022 12:50, Gedare Bloom wrote:
this part looks ok

Thanks, I'm going to hold off on committing this part because the optimization shaved enough time off of task execution that it exposed the bug described in the other patch which is why they're bundled.


Kinsey


On Fri, Feb 25, 2022 at 5:51 PM Kinsey Moore <kinsey.mo...@oarcorp.com> wrote:
Add an architecture-specific implementation for
_CPU_Get_current_per_CPU_control() to reduce overhead for getting the
current CPU's Per_CPU_Control structure.
---
  .../cpu/aarch64/include/rtems/score/cpuimpl.h | 23 +++++++++++++++++++
  1 file changed, 23 insertions(+)

diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h 
b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
index 90fd48ad4e..ffdef2f30a 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
@@ -125,6 +125,29 @@ typedef struct {
    uint64_t register_fpcr;
  } CPU_Interrupt_frame;

+#ifdef RTEMS_SMP
+
+static inline
+struct Per_CPU_Control *_AARCH64_Get_current_per_CPU_control( void )
+{
+  struct Per_CPU_Control *cpu_self;
+  uint64_t value;
+
+  __asm__ volatile (
+    "mrs %0, TPIDR_EL1" : "=&r" ( value ) : : "memory"
+  );
+
+  /* Use EL1 Thread ID Register (TPIDR_EL1) */
+  cpu_self = (struct Per_CPU_Control *)(uintptr_t)value;
+
+  return cpu_self;
+}
+
+#define _CPU_Get_current_per_CPU_control() \
+  _AARCH64_Get_current_per_CPU_control()
+
+#endif /* RTEMS_SMP */
+
  void _CPU_Context_volatile_clobber( uintptr_t pattern );

  void _CPU_Context_validate( uintptr_t pattern );
--
2.30.2

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel
_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to