Re: [PATCH] aarch64: Use page table level 0

2022-07-20 Thread Sebastian Huber

On 19/07/2022 18:58, Kinsey Moore wrote:

+  if ( begin >= max_mappable || end > max_mappable ) {
+rtems_fatal_error_occurred( RTEMS_INVALID_ADDRESS );
+  }


Such a fatal error is not really helpful, since you cannot get the error 
location from the fatal source/code pair. I would add a new code for 
bsp_fatal() and use this function. Alternatively, we could add a new 
fatal error source which uses the link register or the source code 
file/line for generic fatal errors.


--
embedded brains GmbH
Herr Sebastian HUBER
Dornierstr. 4
82178 Puchheim
Germany
email: sebastian.hu...@embedded-brains.de
phone: +49-89-18 94 741 - 16
fax:   +49-89-18 94 741 - 08

Registergericht: Amtsgericht München
Registernummer: HRB 157899
Vertretungsberechtigte Geschäftsführer: Peter Rasmussen, Thomas Dörfler
Unsere Datenschutzerklärung finden Sie hier:
https://embedded-brains.de/datenschutzerklaerung/
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Re: [PATCH] aarch64: Use page table level 0

2022-07-19 Thread Chris Johns
On 20/7/2022 2:58 am, Kinsey Moore wrote:
> This alters the AArch64 page table generation and mapping code and MMU
> configuration to use page table level 0 in addition to levels 1, 2, and
> 3. This allows the mapping of up to 48 bits of memory space and is the
> maximum that can be mapped without relying on additional processor
> extensions. Mappings are restricted based on the number of physical
> address bits that the CPU supports.

OK to push.

I have tested this with 8G of memory mapped to the Versal's DDRCM0_region0_mem
and DDRCM0_region1_mem address spaces.

Thanks
Chris
___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel


[PATCH] aarch64: Use page table level 0

2022-07-19 Thread Kinsey Moore
This alters the AArch64 page table generation and mapping code and MMU
configuration to use page table level 0 in addition to levels 1, 2, and
3. This allows the mapping of up to 48 bits of memory space and is the
maximum that can be mapped without relying on additional processor
extensions. Mappings are restricted based on the number of physical
address bits that the CPU supports.
---
 bsps/aarch64/include/bsp/aarch64-mmu.h| 58 +++
 bsps/aarch64/shared/mmu/vmsav8-64.c   |  7 ++-
 .../aarch64/include/libcpu/mmu-vmsav8-64.h|  1 -
 3 files changed, 53 insertions(+), 13 deletions(-)

diff --git a/bsps/aarch64/include/bsp/aarch64-mmu.h 
b/bsps/aarch64/include/bsp/aarch64-mmu.h
index 35fc79d73a..c881d5e825 100644
--- a/bsps/aarch64/include/bsp/aarch64-mmu.h
+++ b/bsps/aarch64/include/bsp/aarch64-mmu.h
@@ -48,6 +48,9 @@
 extern "C" {
 #endif /* __cplusplus */
 
+/* AArch64 uses levels 0, 1, 2, and 3 */
+#define MMU_MAX_SUBTABLE_PAGE_BITS ( 3 * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS )
+
 typedef struct {
   uintptr_t begin;
   uintptr_t end;
@@ -216,15 +219,15 @@ aarch64_mmu_get_sub_table(
 
 BSP_START_TEXT_SECTION static inline rtems_status_code aarch64_mmu_map_block(
   uint64_t *page_table,
-  uintptr_t root_address,
-  uintptr_t addr,
+  uint64_t root_address,
+  uint64_t addr,
   uint64_t size,
-  uint32_t level,
+  int8_t level,
   uint64_t flags
 )
 {
   uint32_t shift = ( 2 - level ) * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS;
-  uintptr_t granularity = 1 << shift;
+  uint64_t granularity = 1LLU << shift;
   uint64_t page_flag = 0;
 
   if ( level == 2 ) {
@@ -233,7 +236,7 @@ BSP_START_TEXT_SECTION static inline rtems_status_code 
aarch64_mmu_map_block(
 
   while ( size > 0 ) {
 uintptr_t index = aarch64_mmu_get_index( root_address, addr, shift );
-uintptr_t block_bottom = RTEMS_ALIGN_DOWN( addr, granularity );
+uint64_t block_bottom = RTEMS_ALIGN_DOWN( addr, granularity );
 uint64_t chunk_size = granularity;
 
 /* check for perfect block match */
@@ -270,7 +273,7 @@ BSP_START_TEXT_SECTION static inline rtems_status_code 
aarch64_mmu_map_block(
 }
 
 /* Deal with any subtable modification  */
-uintptr_t new_root_address = root_address + index * granularity;
+uint64_t new_root_address = root_address + index * granularity;
 uint64_t *sub_table = NULL;
 rtems_status_code sc;
 
@@ -311,6 +314,33 @@ BSP_START_DATA_SECTION extern const 
aarch64_mmu_config_entry
 BSP_START_DATA_SECTION extern const size_t
   aarch64_mmu_config_table_size;
 
+/* Get the maximum number of bits supported by this hardware */
+BSP_START_TEXT_SECTION static inline uint64_t
+aarch64_mmu_get_cpu_pa_bits( void )
+{
+  uint64_t id_reg = _AArch64_Read_id_aa64mmfr0_el1();
+
+  switch ( AARCH64_ID_AA64MMFR0_EL1_PARANGE_GET( id_reg ) ) {
+  case 0:
+ return 32;
+  case 1:
+ return 36;
+  case 2:
+ return 40;
+  case 3:
+ return 42;
+  case 4:
+ return 44;
+  case 5:
+ return 48;
+  case 6:
+ return 52;
+  default:
+ return 48;
+  }
+  return 48;
+}
+
 BSP_START_TEXT_SECTION static inline void
 aarch64_mmu_set_translation_table_entries(
   uint64_t *ttb,
@@ -320,14 +350,19 @@ aarch64_mmu_set_translation_table_entries(
   /* Force alignemnt to 4k page size */
   uintptr_t begin = RTEMS_ALIGN_DOWN( config->begin, MMU_PAGE_SIZE );
   uintptr_t end = RTEMS_ALIGN_UP( config->end, MMU_PAGE_SIZE );
+  uint64_t max_mappable = 1LLU << aarch64_mmu_get_cpu_pa_bits();
   rtems_status_code sc;
 
+  if ( begin >= max_mappable || end > max_mappable ) {
+rtems_fatal_error_occurred( RTEMS_INVALID_ADDRESS );
+  }
+
   sc = aarch64_mmu_map_block(
 ttb,
 0x0,
 begin,
 end - begin,
-0,
+-1,
 config->flags
   );
 
@@ -347,7 +382,7 @@ BSP_START_TEXT_SECTION static inline void 
aarch64_mmu_setup_translation_table(
   aarch64_mmu_page_table_set_blocks(
 ttb,
 (uintptr_t) NULL,
-MMU_TOP_LEVEL_PAGE_BITS,
+MMU_MAX_SUBTABLE_PAGE_BITS,
 0
   );
 
@@ -390,10 +425,11 @@ aarch64_mmu_disable( void )
 BSP_START_TEXT_SECTION static inline void aarch64_mmu_setup( void )
 {
   /* Set TCR */
-  /* 128GB/36 bits mappable (64-0x1c) */
+  /* 256TB/48 bits mappable (64-0x10) */
   _AArch64_Write_tcr_el1(
-AARCH64_TCR_EL1_T0SZ( 0x1c ) | AARCH64_TCR_EL1_IRGN0( 0x1 ) |
-AARCH64_TCR_EL1_ORGN0( 0x1 ) | AARCH64_TCR_EL1_SH0( 0x3 ) | 
AARCH64_TCR_EL1_TG0( 0x0 )
+AARCH64_TCR_EL1_T0SZ( 0x10 ) | AARCH64_TCR_EL1_IRGN0( 0x1 ) |
+AARCH64_TCR_EL1_ORGN0( 0x1 ) | AARCH64_TCR_EL1_SH0( 0x3 ) |
+AARCH64_TCR_EL1_TG0( 0x0 ) | AARCH64_TCR_EL1_IPS( 0x5ULL )
   );
 
   /* Set MAIR */
diff --git a/bsps/aarch64/shared/mmu/vmsav8-64.c 
b/bsps/aarch64/shared/mmu/vmsav8-64.c
index 9caa91c414..190a05f7d5 100644
--- a/bsps/aarch64/shared/mmu/vmsav8-64.c
+++ b/bsps/aarch64/shared/mmu/vmsav8-64.c
@@ -47,6 +47,11 @@ rtems_status_code aarch64_mmu_map(
 )
 {
   rtems_status_code sc;
+  uint64_t max_mappable = 1LLU <<