[PATCH glibc] Fix build for hurd/thread-self.c for i386.

2023-05-21 Thread Flavio Cruz
We need to include hurd.h for libc_hidden_proto (__hurd_thread_self),
introduced in 
https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=b44c1e12524bb5de0f93294a7c24c8e41c06bb75

This the error log:

In file included from :
./../include/libc-symbols.h:472:33: error: '__EI___hurd_thread_self' aliased to 
undefined symbol '__GI___hurd_thread_self'
  472 |   extern thread __typeof (name) __EI_##name \
  | ^
./../include/libc-symbols.h:468:3: note: in expansion of macro '__hidden_ver2'
  468 |   __hidden_ver2 (, local, internal, name)
  |   ^
./../include/libc-symbols.h:476:41: note: in expansion of macro '__hidden_ver1'
  476 | #  define hidden_def(name)  __hidden_ver1(__GI_##name, 
name, name);
  | ^
./../include/libc-symbols.h:557:32: note: in expansion of macro 'hidden_def'
  557 | # define libc_hidden_def(name) hidden_def (name)
  |^~
thread-self.c:27:1: note: in expansion of macro 'libc_hidden_def'
   27 | libc_hidden_def (__hurd_thread_self)
  | ^~~
---
 hurd/thread-self.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/hurd/thread-self.c b/hurd/thread-self.c
index af013503bf..494a127aa5 100644
--- a/hurd/thread-self.c
+++ b/hurd/thread-self.c
@@ -16,6 +16,7 @@
License along with the GNU C Library; if not, see
.  */
 
+#include 
 #include 
 
 thread_t
-- 
2.39.2




Re: [PATCH] x86_64: fix APIC initialization

2023-05-21 Thread Samuel Thibault
Applied, thanks!

Luca Dariz, le dim. 21 mai 2023 22:49:18 +0200, a ecrit:
> * i386/i386at/acpi_parse_apic.c: use vm_offset_t instead of uint32_t
>   for vm addresses
> * x86_64/Makefrag.am: support --enable-apic
> ---
>  i386/i386at/acpi_parse_apic.c |  8 
>  x86_64/Makefrag.am| 17 ++---
>  2 files changed, 18 insertions(+), 7 deletions(-)
> 
> diff --git a/i386/i386at/acpi_parse_apic.c b/i386/i386at/acpi_parse_apic.c
> index 2680d0aa..27e3410d 100644
> --- a/i386/i386at/acpi_parse_apic.c
> +++ b/i386/i386at/acpi_parse_apic.c
> @@ -370,20 +370,20 @@ static int
>  acpi_apic_parse_table(struct acpi_apic *apic)
>  {
>  struct acpi_apic_dhdr *apic_entry = NULL;
> -uint32_t end = 0;
> +vm_offset_t end = 0;
>  uint8_t numcpus = 1;
>  
>  /* Get the address of first APIC entry */
>  apic_entry = (struct acpi_apic_dhdr*) apic->entry;
>  
>  /* Get the end address of APIC table */
> -end = (uint32_t) apic + apic->header.length;
> +end = (vm_offset_t) apic + apic->header.length;
>  
>  /* Initialize number of cpus */
>  numcpus = apic_get_numcpus();
>  
>  /* Search in APIC entry. */
> -while ((uint32_t)apic_entry < end) {
> +while ((vm_offset_t)apic_entry < end) {
>  struct acpi_apic_lapic *lapic_entry;
>  struct acpi_apic_ioapic *ioapic_entry;
>  struct acpi_apic_irq_override *irq_override_entry;
> @@ -421,7 +421,7 @@ acpi_apic_parse_table(struct acpi_apic *apic)
>  }
>  
>  /* Get next APIC entry. */
> -apic_entry = (struct acpi_apic_dhdr*)((uint32_t) apic_entry
> +apic_entry = (struct acpi_apic_dhdr*)((vm_offset_t) apic_entry
>+ apic_entry->length);
>  
>  /* Update number of cpus. */
> diff --git a/x86_64/Makefrag.am b/x86_64/Makefrag.am
> index 9bad6b7a..9f636a08 100644
> --- a/x86_64/Makefrag.am
> +++ b/x86_64/Makefrag.am
> @@ -28,6 +28,8 @@ if HOST_x86_64
>  #
>  
>  libkernel_a_SOURCES += \
> + i386/i386at/acpi_parse_apic.h \
> + i386/i386at/acpi_parse_apic.c \
>   i386/i386at/autoconf.c \
>   i386/i386at/autoconf.h \
>   i386/i386at/biosmem.c \
> @@ -65,7 +67,6 @@ libkernel_a_SOURCES += \
>   i386/i386at/kdsoft.h \
>   i386/i386at/mem.c \
>   i386/i386at/mem.h \
> - i386/i386at/pic_isa.c \
>   i386/i386at/rtc.c \
>   i386/i386at/rtc.h
>  endif
> @@ -95,15 +96,25 @@ libkernel_a_SOURCES += \
>  
>  if PLATFORM_at
>  libkernel_a_SOURCES += \
> + i386/i386/apic.h \
> + i386/i386/apic.c \
>   i386/i386/hardclock.c \
>   i386/i386/hardclock.h \
>   i386/i386/irq.c \
>   i386/i386/irq.h \
>   i386/i386/msr.h \
> - i386/i386/pic.c \
> - i386/i386/pic.h \
>   i386/i386/pit.c \
>   i386/i386/pit.h
> +
> +if enable_apic
> +libkernel_a_SOURCES += \
> + i386/i386at/ioapic.c
> +else
> +libkernel_a_SOURCES += \
> + i386/i386/pic.c \
> + i386/i386/pic.h \
> + i386/i386at/pic_isa.c
> +endif
>  endif
>  
>  #
> -- 
> 2.30.2
> 
> 

-- 
Samuel
---
Pour une évaluation indépendante, transparente et rigoureuse !
Je soutiens la Commission d'Évaluation de l'Inria.



Re: [PATCH v2] x86_64: push user's VM_MAX_ADDRESS

2023-05-21 Thread Samuel Thibault
Applied, thanks!

Luca Dariz, le dim. 21 mai 2023 22:45:24 +0200, a ecrit:
> * i386/include/mach/i386/vm_param.h: check for both KERNEL and USER32
>   to differentiate between user/kernel on x86_64, and push the upper
>   limit of user address space to 128 TB.
> ---
>  i386/include/mach/i386/vm_param.h | 24 
>  1 file changed, 16 insertions(+), 8 deletions(-)
> 
> diff --git a/i386/include/mach/i386/vm_param.h 
> b/i386/include/mach/i386/vm_param.h
> index f09049a5..1576f048 100644
> --- a/i386/include/mach/i386/vm_param.h
> +++ b/i386/include/mach/i386/vm_param.h
> @@ -65,18 +65,26 @@
>   ~(I386_PGBYTES-1))
>  #define i386_trunc_page(x)   (((unsigned long)(x)) & ~(I386_PGBYTES-1))
>  
> -/* User address spaces are 3GB each,
> -   starting at virtual and linear address 0.
> +/* User address spaces are 3GB each on a 32-bit kernel, starting at
> +   virtual and linear address 0.
> +   On a 64-bit krenel we split the address space in half, with the
> +   lower 128TB for the user address space and the upper 128TB for the
> +   kernel address space.
>  
> -   VM_MAX_ADDRESS can be reduced to leave more space for the kernel, but must
> -   not be increased to more than 3GB as glibc and hurd servers would not cope
> -   with that.
> +   On a 32-bit kernel VM_MAX_ADDRESS can be reduced to leave more
> +   space for the kernel, but must not be increased to more than 3GB as
> +   glibc and hurd servers would not cope with that.
> */
>  #define VM_MIN_ADDRESS   (0ULL)
> +
>  #ifdef __x86_64__
> -#define VM_MAX_ADDRESS   (0xc000ULL)
> -#else
> +#if defined(KERNEL) && defined(USER32)
> +#define VM_MAX_ADDRESS   (0xc000UL)
> +#else /* defined(KERNEL) && defined(USER32) */
> +#define VM_MAX_ADDRESS   (0x8000ULL)
> +#endif /* defined(KERNEL) && defined(USER32) */
> +#else /* __x86_64__ */
>  #define VM_MAX_ADDRESS   (0xc000UL)
> -#endif
> +#endif /* __x86_64__ */
>  
>  #endif   /* _MACH_I386_VM_PARAM_H_ */
> -- 
> 2.30.2



[PATCH] x86_64: fix APIC initialization

2023-05-21 Thread Luca Dariz
* i386/i386at/acpi_parse_apic.c: use vm_offset_t instead of uint32_t
  for vm addresses
* x86_64/Makefrag.am: support --enable-apic
---
 i386/i386at/acpi_parse_apic.c |  8 
 x86_64/Makefrag.am| 17 ++---
 2 files changed, 18 insertions(+), 7 deletions(-)

diff --git a/i386/i386at/acpi_parse_apic.c b/i386/i386at/acpi_parse_apic.c
index 2680d0aa..27e3410d 100644
--- a/i386/i386at/acpi_parse_apic.c
+++ b/i386/i386at/acpi_parse_apic.c
@@ -370,20 +370,20 @@ static int
 acpi_apic_parse_table(struct acpi_apic *apic)
 {
 struct acpi_apic_dhdr *apic_entry = NULL;
-uint32_t end = 0;
+vm_offset_t end = 0;
 uint8_t numcpus = 1;
 
 /* Get the address of first APIC entry */
 apic_entry = (struct acpi_apic_dhdr*) apic->entry;
 
 /* Get the end address of APIC table */
-end = (uint32_t) apic + apic->header.length;
+end = (vm_offset_t) apic + apic->header.length;
 
 /* Initialize number of cpus */
 numcpus = apic_get_numcpus();
 
 /* Search in APIC entry. */
-while ((uint32_t)apic_entry < end) {
+while ((vm_offset_t)apic_entry < end) {
 struct acpi_apic_lapic *lapic_entry;
 struct acpi_apic_ioapic *ioapic_entry;
 struct acpi_apic_irq_override *irq_override_entry;
@@ -421,7 +421,7 @@ acpi_apic_parse_table(struct acpi_apic *apic)
 }
 
 /* Get next APIC entry. */
-apic_entry = (struct acpi_apic_dhdr*)((uint32_t) apic_entry
+apic_entry = (struct acpi_apic_dhdr*)((vm_offset_t) apic_entry
   + apic_entry->length);
 
 /* Update number of cpus. */
diff --git a/x86_64/Makefrag.am b/x86_64/Makefrag.am
index 9bad6b7a..9f636a08 100644
--- a/x86_64/Makefrag.am
+++ b/x86_64/Makefrag.am
@@ -28,6 +28,8 @@ if HOST_x86_64
 #
 
 libkernel_a_SOURCES += \
+   i386/i386at/acpi_parse_apic.h \
+   i386/i386at/acpi_parse_apic.c \
i386/i386at/autoconf.c \
i386/i386at/autoconf.h \
i386/i386at/biosmem.c \
@@ -65,7 +67,6 @@ libkernel_a_SOURCES += \
i386/i386at/kdsoft.h \
i386/i386at/mem.c \
i386/i386at/mem.h \
-   i386/i386at/pic_isa.c \
i386/i386at/rtc.c \
i386/i386at/rtc.h
 endif
@@ -95,15 +96,25 @@ libkernel_a_SOURCES += \
 
 if PLATFORM_at
 libkernel_a_SOURCES += \
+   i386/i386/apic.h \
+   i386/i386/apic.c \
i386/i386/hardclock.c \
i386/i386/hardclock.h \
i386/i386/irq.c \
i386/i386/irq.h \
i386/i386/msr.h \
-   i386/i386/pic.c \
-   i386/i386/pic.h \
i386/i386/pit.c \
i386/i386/pit.h
+
+if enable_apic
+libkernel_a_SOURCES += \
+   i386/i386at/ioapic.c
+else
+libkernel_a_SOURCES += \
+   i386/i386/pic.c \
+   i386/i386/pic.h \
+   i386/i386at/pic_isa.c
+endif
 endif
 
 #
-- 
2.30.2




[PATCH v2] x86_64: push user's VM_MAX_ADDRESS

2023-05-21 Thread Luca Dariz
* i386/include/mach/i386/vm_param.h: check for both KERNEL and USER32
  to differentiate between user/kernel on x86_64, and push the upper
  limit of user address space to 128 TB.
---
 i386/include/mach/i386/vm_param.h | 24 
 1 file changed, 16 insertions(+), 8 deletions(-)

diff --git a/i386/include/mach/i386/vm_param.h 
b/i386/include/mach/i386/vm_param.h
index f09049a5..1576f048 100644
--- a/i386/include/mach/i386/vm_param.h
+++ b/i386/include/mach/i386/vm_param.h
@@ -65,18 +65,26 @@
~(I386_PGBYTES-1))
 #define i386_trunc_page(x) (((unsigned long)(x)) & ~(I386_PGBYTES-1))
 
-/* User address spaces are 3GB each,
-   starting at virtual and linear address 0.
+/* User address spaces are 3GB each on a 32-bit kernel, starting at
+   virtual and linear address 0.
+   On a 64-bit krenel we split the address space in half, with the
+   lower 128TB for the user address space and the upper 128TB for the
+   kernel address space.
 
-   VM_MAX_ADDRESS can be reduced to leave more space for the kernel, but must
-   not be increased to more than 3GB as glibc and hurd servers would not cope
-   with that.
+   On a 32-bit kernel VM_MAX_ADDRESS can be reduced to leave more
+   space for the kernel, but must not be increased to more than 3GB as
+   glibc and hurd servers would not cope with that.
*/
 #define VM_MIN_ADDRESS (0ULL)
+
 #ifdef __x86_64__
-#define VM_MAX_ADDRESS (0xc000ULL)
-#else
+#if defined(KERNEL) && defined(USER32)
+#define VM_MAX_ADDRESS (0xc000UL)
+#else /* defined(KERNEL) && defined(USER32) */
+#define VM_MAX_ADDRESS (0x8000ULL)
+#endif /* defined(KERNEL) && defined(USER32) */
+#else /* __x86_64__ */
 #define VM_MAX_ADDRESS (0xc000UL)
-#endif
+#endif /* __x86_64__ */
 
 #endif /* _MACH_I386_VM_PARAM_H_ */
-- 
2.30.2




Re: [PATCH 2/3] x86_64: push user's VM_MAX_ADDRESS

2023-05-21 Thread Luca Dariz

Il 21/05/23 21:17, Samuel Thibault ha scritto:

Luca Dariz, le dim. 21 mai 2023 10:57:57 +0200, a ecrit:

+#if defined(KERNEL) && defined(USER32)
+#define VM_MAX_ADDRESS (0xc000UL)
+#else /* defined(KERNEL) && defined(USER32) */
+#define VM_MAX_ADDRESS (0x7fffULL)


Why not 0x8000ULL)?


Ah, right, the upper limit is not included.


Luca




Re: [PATCH 2/2] Remove an unused include

2023-05-21 Thread Samuel Thibault
Applied, thanks!

Sergey Bugaev, le jeu. 18 mai 2023 19:57:19 +0300, a ecrit:
> ---
>  utils/ps.c | 1 -
>  1 file changed, 1 deletion(-)
> 
> diff --git a/utils/ps.c b/utils/ps.c
> index a852b92f..6a08f736 100644
> --- a/utils/ps.c
> +++ b/utils/ps.c
> @@ -21,7 +21,6 @@
>  #include 
>  #include 
>  #include 
> -#include 
>  #include 
>  #include 
>  #include 
> -- 
> 2.40.1
> 
> 

-- 
Samuel
---
Pour une évaluation indépendante, transparente et rigoureuse !
Je soutiens la Commission d'Évaluation de l'Inria.



Re: [PATCH 1/2] streamio: Implement trivfs_append_args ()

2023-05-21 Thread Samuel Thibault
Applied, thanks!

Sergey Bugaev, le jeu. 18 mai 2023 19:57:18 +0300, a ecrit:
> This enables me to run 'fsysopts /dev/mach-console' and get:
> 
> /hurd/streamio --writable console
> ---
>  trans/streamio.c | 39 +++
>  1 file changed, 39 insertions(+)
> 
> diff --git a/trans/streamio.c b/trans/streamio.c
> index 2d40a5d3..272a002c 100644
> --- a/trans/streamio.c
> +++ b/trans/streamio.c
> @@ -23,7 +23,9 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
> +#include 
>  
>  #include 
>  #include 
> @@ -722,6 +724,43 @@ trivfs_S_file_syncfs (struct trivfs_protid *cred,
>return err;
>  }
>  
> +error_t
> +trivfs_append_args (struct trivfs_control *fsys,
> + char **argz, size_t *argz_len)
> +{
> +  error_t err;
> +
> +  switch (trivfs_allow_open & O_RDWR)
> +{
> +default:
> +  assert_backtrace (!"Bad trivfs_allow_open");
> +case O_READ:
> +  err = argz_add (argz, argz_len, "--readonly");
> +  break;
> +case O_WRITE:
> +  err = argz_add (argz, argz_len, "--writeonly");
> +  break;
> +case O_RDWR:
> +  err = argz_add (argz, argz_len, "--writable");
> +  break;
> +}
> +
> +  if (err)
> +return err;
> +
> +  if (rdev != (dev_t) 0)
> +{
> +  char buf[40];
> +  snprintf (buf, sizeof (buf), "--rdev=%d,%d",
> + gnu_dev_major (rdev), gnu_dev_minor (rdev));
> +  err = argz_add (argz, argz_len, buf);
> +  if (err)
> +return err;
> +}
> +
> +  return argz_add (argz, argz_len, stream_name);
> +}
> +
>  
>  /* This flag is set if there is an outstanding device_write.  */
>  static int output_pending;
> -- 
> 2.40.1
> 
> 

-- 
Samuel
---
Pour une évaluation indépendante, transparente et rigoureuse !
Je soutiens la Commission d'Évaluation de l'Inria.



Re: [PATCH 1/3] pmap: dynamically allocate the whole user page tree map

2023-05-21 Thread Samuel Thibault
Applied, thanks!!


Luca Dariz, le dim. 21 mai 2023 10:57:56 +0200, a ecrit:
> * i386/intel/pmap.c: switch to dynamic allocation of all the page tree
>   map levels for the user-space address range, using a separate kmem
>   cache for each level. This allows to extend the usable memory space
>   on x86_64 to use more than one L3 page for user space. The kernel
>   address map is left untouched for now as it needs a different
>   initialization.
> * i386/intel/pmap.h: remove hardcoded user pages and add macro to
>   recontruct the page-to-virtual mapping
> ---
>  i386/intel/pmap.c | 544 ++
>  i386/intel/pmap.h |  21 +-
>  2 files changed, 277 insertions(+), 288 deletions(-)
> 
> diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
> index e867ed59..3a30271e 100644
> --- a/i386/intel/pmap.c
> +++ b/i386/intel/pmap.c
> @@ -398,6 +398,7 @@ struct pmap   kernel_pmap_store;
>  pmap_t   kernel_pmap;
>  
>  struct kmem_cache pmap_cache;  /* cache of pmap structures */
> +struct kmem_cache pt_cache;/* cache of page tables */
>  struct kmem_cache pd_cache;/* cache of page directories */
>  #if PAE
>  struct kmem_cache pdpt_cache;  /* cache of page directory pointer tables */
> @@ -429,6 +430,14 @@ pt_entry_t *kernel_page_dir;
>   */
>  static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS * NCPUS];
>  
> +#ifdef __x86_64__
> +static inline pt_entry_t *
> +pmap_l4base(const pmap_t pmap, vm_offset_t lin_addr)
> +{
> + return >l4base[lin2l4num(lin_addr)];
> +}
> +#endif
> +
>  #ifdef PAE
>  static inline pt_entry_t *
>  pmap_ptp(const pmap_t pmap, vm_offset_t lin_addr)
> @@ -443,7 +452,7 @@ pmap_ptp(const pmap_t pmap, vm_offset_t lin_addr)
>  #else /* __x86_64__ */
>   pdp_table = pmap->pdpbase;
>  #endif /* __x86_64__ */
> - return pdp_table;
> + return _table[lin2pdpnum(lin_addr)];
>  }
>  #endif
>  
> @@ -456,7 +465,9 @@ pmap_pde(const pmap_t pmap, vm_offset_t addr)
>  #if PAE
>   pt_entry_t *pdp_table;
>   pdp_table = pmap_ptp(pmap, addr);
> - pt_entry_t pde = pdp_table[lin2pdpnum(addr)];
> +if (pdp_table == 0)
> + return(PT_ENTRY_NULL);
> + pt_entry_t pde = *pdp_table;
>   if ((pde & INTEL_PTE_VALID) == 0)
>   return PT_ENTRY_NULL;
>   page_dir = (pt_entry_t *) ptetokv(pde);
> @@ -1092,15 +1103,18 @@ void pmap_init(void)
>*/
>   s = (vm_size_t) sizeof(struct pmap);
>   kmem_cache_init(_cache, "pmap", s, 0, NULL, 0);
> - kmem_cache_init(_cache, "pd",
> + kmem_cache_init(_cache, "pmap_L1",
> + INTEL_PGBYTES, INTEL_PGBYTES, NULL,
> + KMEM_CACHE_PHYSMEM);
> + kmem_cache_init(_cache, "pmap_L2",
>   INTEL_PGBYTES, INTEL_PGBYTES, NULL,
>   KMEM_CACHE_PHYSMEM);
>  #if PAE
> - kmem_cache_init(_cache, "pdpt",
> + kmem_cache_init(_cache, "pmap_L3",
>   INTEL_PGBYTES, INTEL_PGBYTES, NULL,
>   KMEM_CACHE_PHYSMEM);
>  #ifdef __x86_64__
> - kmem_cache_init(_cache, "L4",
> + kmem_cache_init(_cache, "pmap_L4",
>   INTEL_PGBYTES, INTEL_PGBYTES, NULL,
>   KMEM_CACHE_PHYSMEM);
>  #endif /* __x86_64__ */
> @@ -1244,6 +1258,11 @@ pmap_page_table_page_dealloc(vm_offset_t pa)
>   vm_object_lock(pmap_object);
>   m = vm_page_lookup(pmap_object, pa);
>   vm_page_lock_queues();
> +#ifdef   MACH_PV_PAGETABLES
> +if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa)))
> +panic("couldn't unpin page %llx(%lx)\n", pa, (vm_offset_t) 
> kv_to_ma(pa));
> +pmap_set_page_readwrite((void*) phystokv(pa));
> +#endif   /* MACH_PV_PAGETABLES */
>   vm_page_free(m);
>   inuse_ptepages_count--;
>   vm_page_unlock_queues();
> @@ -1265,7 +1284,7 @@ pmap_page_table_page_dealloc(vm_offset_t pa)
>  pmap_t pmap_create(vm_size_t size)
>  {
>  #ifdef __x86_64__
> - // needs to be reworked if we want to dynamically allocate PDPs
> + // needs to be reworked if we want to dynamically allocate PDPs for 
> kernel
>   const int PDPNUM = PDPNUM_KERNEL;
>  #endif
>   pt_entry_t  *page_dir[PDPNUM];
> @@ -1360,30 +1379,6 @@ pmap_t pmap_create(vm_size_t size)
>   memset(p->l4base, 0, INTEL_PGBYTES);
>   WRITE_PTE(>l4base[lin2l4num(VM_MIN_KERNEL_ADDRESS)],
> pa_to_pte(kvtophys((vm_offset_t) pdp_kernel)) | 
> INTEL_PTE_VALID | INTEL_PTE_WRITE);
> -#if lin2l4num(VM_MIN_KERNEL_ADDRESS) != lin2l4num(VM_MAX_USER_ADDRESS)
> - // kernel vm and user vm are not in the same l4 entry, so add the user 
> one
> -// TODO alloc only PDPTE for the user range VM_MIN_USER_ADDRESS, 
> VM_MAX_USER_ADDRESS
> - // and keep the same for kernel range, in l4 table we have different 
> entries
> - pt_entry_t *pdp_user = (pt_entry_t *) kmem_cache_alloc(_cache);
> - if (pdp_user == NULL) {
> - panic("pmap create");
> 

Re: [PATCH 2/3] x86_64: push user's VM_MAX_ADDRESS

2023-05-21 Thread Samuel Thibault
Luca Dariz, le dim. 21 mai 2023 10:57:57 +0200, a ecrit:
> +#if defined(KERNEL) && defined(USER32)
> +#define VM_MAX_ADDRESS   (0xc000UL)
> +#else /* defined(KERNEL) && defined(USER32) */
> +#define VM_MAX_ADDRESS   (0x7fffULL)

Why not 0x8000ULL)?

Samuel



Re: [PATCH 3/3] x86_64: fix descriptor loading for 64-bit addresses

2023-05-21 Thread Samuel Thibault
Applied, thanks!

Luca Dariz, le dim. 21 mai 2023 10:57:58 +0200, a ecrit:
> * i386/i386/seg.h: use proper type for segment addresses. This is not
>   impacting any functionality on 64-bit, as segments limits are
>   ignored, but at least we silence a warning.
> ---
>  i386/i386/seg.h | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/i386/i386/seg.h b/i386/i386/seg.h
> index 71c05e49..673d1d9f 100644
> --- a/i386/i386/seg.h
> +++ b/i386/i386/seg.h
> @@ -32,6 +32,7 @@
>  #define  _I386_SEG_H_
>  
>  #include 
> +#include 
>  
>  /*
>   * i386 segmentation.
> @@ -181,7 +182,7 @@ static inline void lldt(unsigned short ldt_selector)
>  
>  /* Fill a segment descriptor.  */
>  static inline void
> -fill_descriptor(struct real_descriptor *_desc, unsigned base, unsigned limit,
> +fill_descriptor(struct real_descriptor *_desc, vm_offset_t base, vm_offset_t 
> limit,
>   unsigned char access, unsigned char sizebits)
>  {
>   /* TODO: when !MACH_PV_DESCRIPTORS, setting desc and just memcpy isn't 
> simpler actually */
> -- 
> 2.30.2
> 
> 

-- 
Samuel
---
Pour une évaluation indépendante, transparente et rigoureuse !
Je soutiens la Commission d'Évaluation de l'Inria.



Re: Some progress, Guix rumpdisk still crashes...

2023-05-21 Thread Janneke Nieuwenhuizen
Svante Signell writes:

Hi!

> On Wed, 2023-05-17 at 20:24 +0200, Janneke Nieuwenhuizen wrote:

>> rumpdisk still crashes, but the good news (I guess) is that it seems to
[..]

> I use for hurdX (hurd-cross):
> qemu-system-x86_64 -chardev stdio,id=char0,logfile=serial.log,signal=off 
> -serial
> chardev:char0 -m 2048 -enable-kvm -drive file=hurd-cross-serial.img
> And added to /boot/grub/grub.cfg:
> set serial --speed=9600 --unit=0 --word=8 --parity=no --stop=1
> set terminal_input serial
> set terminal_output serial
> set timeout=5

Thanks, but rumpdisk now boots, see

https://lists.gnu.org/archive/html/bug-hurd/2023-05//msg00331.html

(it's not being used yet, we're working on that ;-)

Greetings,
Janneke

-- 
Janneke Nieuwenhuizen   | GNU LilyPond https://LilyPond.org
Freelance IT https://www.JoyOfSource.com | Avatar® https://AvatarAcademy.com



Re: Some progress, Guix rumpdisk still crashes...

2023-05-21 Thread Svante Signell
On Wed, 2023-05-17 at 20:24 +0200, Janneke Nieuwenhuizen wrote:
> Hi!
> 
> With this newly patched glibc
> 
>     https://gitlab.com/janneke/guix/-/tree/wip-hurd12
> 
> rumpdisk still crashes, but the good news (I guess) is that it seems to
> get somewhat further, or at least it crashes differently.  Here are the
> last 24 (WTF, 1980 wants their screensize back!?) lines (I don't know
> how to get the full log from QEMU):

I use for hurdX (hurd-cross):
qemu-system-x86_64 -chardev stdio,id=char0,logfile=serial.log,signal=off -serial
chardev:char0 -m 2048 -enable-kvm -drive file=hurd-cross-serial.img
And added to /boot/grub/grub.cfg:
set serial --speed=9600 --unit=0 --word=8 --parity=no --stop=1
set terminal_input serial
set terminal_output serial
set timeout=5




[PATCH 3/3] x86_64: fix descriptor loading for 64-bit addresses

2023-05-21 Thread Luca Dariz
* i386/i386/seg.h: use proper type for segment addresses. This is not
  impacting any functionality on 64-bit, as segments limits are
  ignored, but at least we silence a warning.
---
 i386/i386/seg.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/i386/i386/seg.h b/i386/i386/seg.h
index 71c05e49..673d1d9f 100644
--- a/i386/i386/seg.h
+++ b/i386/i386/seg.h
@@ -32,6 +32,7 @@
 #define_I386_SEG_H_
 
 #include 
+#include 
 
 /*
  * i386 segmentation.
@@ -181,7 +182,7 @@ static inline void lldt(unsigned short ldt_selector)
 
 /* Fill a segment descriptor.  */
 static inline void
-fill_descriptor(struct real_descriptor *_desc, unsigned base, unsigned limit,
+fill_descriptor(struct real_descriptor *_desc, vm_offset_t base, vm_offset_t 
limit,
unsigned char access, unsigned char sizebits)
 {
/* TODO: when !MACH_PV_DESCRIPTORS, setting desc and just memcpy isn't 
simpler actually */
-- 
2.30.2




[PATCH 1/3] pmap: dynamically allocate the whole user page tree map

2023-05-21 Thread Luca Dariz
* i386/intel/pmap.c: switch to dynamic allocation of all the page tree
  map levels for the user-space address range, using a separate kmem
  cache for each level. This allows to extend the usable memory space
  on x86_64 to use more than one L3 page for user space. The kernel
  address map is left untouched for now as it needs a different
  initialization.
* i386/intel/pmap.h: remove hardcoded user pages and add macro to
  recontruct the page-to-virtual mapping
---
 i386/intel/pmap.c | 544 ++
 i386/intel/pmap.h |  21 +-
 2 files changed, 277 insertions(+), 288 deletions(-)

diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index e867ed59..3a30271e 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -398,6 +398,7 @@ struct pmap kernel_pmap_store;
 pmap_t kernel_pmap;
 
 struct kmem_cache pmap_cache;  /* cache of pmap structures */
+struct kmem_cache pt_cache;/* cache of page tables */
 struct kmem_cache pd_cache;/* cache of page directories */
 #if PAE
 struct kmem_cache pdpt_cache;  /* cache of page directory pointer tables */
@@ -429,6 +430,14 @@ pt_entry_t *kernel_page_dir;
  */
 static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS * NCPUS];
 
+#ifdef __x86_64__
+static inline pt_entry_t *
+pmap_l4base(const pmap_t pmap, vm_offset_t lin_addr)
+{
+   return >l4base[lin2l4num(lin_addr)];
+}
+#endif
+
 #ifdef PAE
 static inline pt_entry_t *
 pmap_ptp(const pmap_t pmap, vm_offset_t lin_addr)
@@ -443,7 +452,7 @@ pmap_ptp(const pmap_t pmap, vm_offset_t lin_addr)
 #else /* __x86_64__ */
pdp_table = pmap->pdpbase;
 #endif /* __x86_64__ */
-   return pdp_table;
+   return _table[lin2pdpnum(lin_addr)];
 }
 #endif
 
@@ -456,7 +465,9 @@ pmap_pde(const pmap_t pmap, vm_offset_t addr)
 #if PAE
pt_entry_t *pdp_table;
pdp_table = pmap_ptp(pmap, addr);
-   pt_entry_t pde = pdp_table[lin2pdpnum(addr)];
+if (pdp_table == 0)
+   return(PT_ENTRY_NULL);
+   pt_entry_t pde = *pdp_table;
if ((pde & INTEL_PTE_VALID) == 0)
return PT_ENTRY_NULL;
page_dir = (pt_entry_t *) ptetokv(pde);
@@ -1092,15 +1103,18 @@ void pmap_init(void)
 */
s = (vm_size_t) sizeof(struct pmap);
kmem_cache_init(_cache, "pmap", s, 0, NULL, 0);
-   kmem_cache_init(_cache, "pd",
+   kmem_cache_init(_cache, "pmap_L1",
+   INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+   KMEM_CACHE_PHYSMEM);
+   kmem_cache_init(_cache, "pmap_L2",
INTEL_PGBYTES, INTEL_PGBYTES, NULL,
KMEM_CACHE_PHYSMEM);
 #if PAE
-   kmem_cache_init(_cache, "pdpt",
+   kmem_cache_init(_cache, "pmap_L3",
INTEL_PGBYTES, INTEL_PGBYTES, NULL,
KMEM_CACHE_PHYSMEM);
 #ifdef __x86_64__
-   kmem_cache_init(_cache, "L4",
+   kmem_cache_init(_cache, "pmap_L4",
INTEL_PGBYTES, INTEL_PGBYTES, NULL,
KMEM_CACHE_PHYSMEM);
 #endif /* __x86_64__ */
@@ -1244,6 +1258,11 @@ pmap_page_table_page_dealloc(vm_offset_t pa)
vm_object_lock(pmap_object);
m = vm_page_lookup(pmap_object, pa);
vm_page_lock_queues();
+#ifdef MACH_PV_PAGETABLES
+if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa)))
+panic("couldn't unpin page %llx(%lx)\n", pa, (vm_offset_t) 
kv_to_ma(pa));
+pmap_set_page_readwrite((void*) phystokv(pa));
+#endif /* MACH_PV_PAGETABLES */
vm_page_free(m);
inuse_ptepages_count--;
vm_page_unlock_queues();
@@ -1265,7 +1284,7 @@ pmap_page_table_page_dealloc(vm_offset_t pa)
 pmap_t pmap_create(vm_size_t size)
 {
 #ifdef __x86_64__
-   // needs to be reworked if we want to dynamically allocate PDPs
+   // needs to be reworked if we want to dynamically allocate PDPs for 
kernel
const int PDPNUM = PDPNUM_KERNEL;
 #endif
pt_entry_t  *page_dir[PDPNUM];
@@ -1360,30 +1379,6 @@ pmap_t pmap_create(vm_size_t size)
memset(p->l4base, 0, INTEL_PGBYTES);
WRITE_PTE(>l4base[lin2l4num(VM_MIN_KERNEL_ADDRESS)],
  pa_to_pte(kvtophys((vm_offset_t) pdp_kernel)) | 
INTEL_PTE_VALID | INTEL_PTE_WRITE);
-#if lin2l4num(VM_MIN_KERNEL_ADDRESS) != lin2l4num(VM_MAX_USER_ADDRESS)
-   // kernel vm and user vm are not in the same l4 entry, so add the user 
one
-// TODO alloc only PDPTE for the user range VM_MIN_USER_ADDRESS, 
VM_MAX_USER_ADDRESS
-   // and keep the same for kernel range, in l4 table we have different 
entries
-   pt_entry_t *pdp_user = (pt_entry_t *) kmem_cache_alloc(_cache);
-   if (pdp_user == NULL) {
-   panic("pmap create");
-   }
-memset(pdp_user, 0, INTEL_PGBYTES);
-   WRITE_PTE(>l4base[lin2l4num(VM_MIN_USER_ADDRESS)],
- pa_to_pte(kvtophys((vm_offset_t) pdp_user)) | INTEL_PTE_VALID 
| INTEL_PTE_WRITE | INTEL_PTE_USER);
-#endif /* 

[PATCH 2/3] x86_64: push user's VM_MAX_ADDRESS

2023-05-21 Thread Luca Dariz
* i386/include/mach/i386/vm_param.h: check for both KERNEL and USER32
  to differentiate between user/kernel on x86_64, and push the upper
  limit of user address space to 128 TB.
---
 i386/include/mach/i386/vm_param.h | 24 
 1 file changed, 16 insertions(+), 8 deletions(-)

diff --git a/i386/include/mach/i386/vm_param.h 
b/i386/include/mach/i386/vm_param.h
index f09049a5..d1a6c656 100644
--- a/i386/include/mach/i386/vm_param.h
+++ b/i386/include/mach/i386/vm_param.h
@@ -65,18 +65,26 @@
~(I386_PGBYTES-1))
 #define i386_trunc_page(x) (((unsigned long)(x)) & ~(I386_PGBYTES-1))
 
-/* User address spaces are 3GB each,
-   starting at virtual and linear address 0.
+/* User address spaces are 3GB each on a 32-bit kernel, starting at
+   virtual and linear address 0.
+   On a 64-bit krenel we split the address space in half, with the
+   lower 128TB for the user address space and the upper 128TB for the
+   kernel address space.
 
-   VM_MAX_ADDRESS can be reduced to leave more space for the kernel, but must
-   not be increased to more than 3GB as glibc and hurd servers would not cope
-   with that.
+   On a 32-bit kernel VM_MAX_ADDRESS can be reduced to leave more
+   space for the kernel, but must not be increased to more than 3GB as
+   glibc and hurd servers would not cope with that.
*/
 #define VM_MIN_ADDRESS (0ULL)
+
 #ifdef __x86_64__
-#define VM_MAX_ADDRESS (0xc000ULL)
-#else
+#if defined(KERNEL) && defined(USER32)
+#define VM_MAX_ADDRESS (0xc000UL)
+#else /* defined(KERNEL) && defined(USER32) */
+#define VM_MAX_ADDRESS (0x7fffULL)
+#endif /* defined(KERNEL) && defined(USER32) */
+#else /* __x86_64__ */
 #define VM_MAX_ADDRESS (0xc000UL)
-#endif
+#endif /* __x86_64__ */
 
 #endif /* _MACH_I386_VM_PARAM_H_ */
-- 
2.30.2