[PATCH v6 01/14] x86/boot: Place kernel_info at a fixed offset

2023-05-04 Thread Ross Philipson
From: Arvind Sankar 

There are use cases for storing the offset of a symbol in kernel_info.
For example, the trenchboot series [0] needs to store the offset of the
Measured Launch Environment header in kernel_info.

Since commit (note: commit ID from tip/master)

commit 527afc212231 ("x86/boot: Check that there are no run-time relocations")

run-time relocations are not allowed in the compressed kernel, so simply
using the symbol in kernel_info, as

.long   symbol

will cause a linker error because this is not position-independent.

With kernel_info being a separate object file and in a different section
from startup_32, there is no way to calculate the offset of a symbol
from the start of the image in a position-independent way.

To enable such use cases, put kernel_info into its own section which is
placed at a predetermined offset (KERNEL_INFO_OFFSET) via the linker
script. This will allow calculating the symbol offset in a
position-independent way, by adding the offset from the start of
kernel_info to KERNEL_INFO_OFFSET.

Ensure that kernel_info is aligned, and use the SYM_DATA.* macros
instead of bare labels. This stores the size of the kernel_info
structure in the ELF symbol table.

Signed-off-by: Arvind Sankar 
Cc: Ross Philipson 
Signed-off-by: Ross Philipson 
---
 arch/x86/boot/compressed/kernel_info.S | 19 +++
 arch/x86/boot/compressed/kernel_info.h | 12 
 arch/x86/boot/compressed/vmlinux.lds.S |  6 ++
 3 files changed, 33 insertions(+), 4 deletions(-)
 create mode 100644 arch/x86/boot/compressed/kernel_info.h

diff --git a/arch/x86/boot/compressed/kernel_info.S 
b/arch/x86/boot/compressed/kernel_info.S
index f818ee8..c18f071 100644
--- a/arch/x86/boot/compressed/kernel_info.S
+++ b/arch/x86/boot/compressed/kernel_info.S
@@ -1,12 +1,23 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 
+#include 
 #include 
+#include "kernel_info.h"
 
-   .section ".rodata.kernel_info", "a"
+/*
+ * If a field needs to hold the offset of a symbol from the start
+ * of the image, use the macro below, eg
+ * .long   rva(symbol)
+ * This will avoid creating run-time relocations, which are not
+ * allowed in the compressed kernel.
+ */
+
+#define rva(X) (((X) - kernel_info) + KERNEL_INFO_OFFSET)
 
-   .global kernel_info
+   .section ".rodata.kernel_info", "a"
 
-kernel_info:
+   .balign 16
+SYM_DATA_START(kernel_info)
/* Header, Linux top (structure). */
.ascii  "LToP"
/* Size. */
@@ -19,4 +30,4 @@ kernel_info:
 
 kernel_info_var_len_data:
/* Empty for time being... */
-kernel_info_end:
+SYM_DATA_END_LABEL(kernel_info, SYM_L_LOCAL, kernel_info_end)
diff --git a/arch/x86/boot/compressed/kernel_info.h 
b/arch/x86/boot/compressed/kernel_info.h
new file mode 100644
index 000..c127f84
--- /dev/null
+++ b/arch/x86/boot/compressed/kernel_info.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BOOT_COMPRESSED_KERNEL_INFO_H
+#define BOOT_COMPRESSED_KERNEL_INFO_H
+
+#ifdef CONFIG_X86_64
+#define KERNEL_INFO_OFFSET 0x500
+#else /* 32-bit */
+#define KERNEL_INFO_OFFSET 0x100
+#endif
+
+#endif /* BOOT_COMPRESSED_KERNEL_INFO_H */
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S 
b/arch/x86/boot/compressed/vmlinux.lds.S
index b22f34b..265c88f 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -7,6 +7,7 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
 
 #include 
 #include 
+#include "kernel_info.h"
 
 #ifdef CONFIG_X86_64
 OUTPUT_ARCH(i386:x86-64)
@@ -27,6 +28,11 @@ SECTIONS
HEAD_TEXT
_ehead = . ;
}
+   .rodata.kernel_info KERNEL_INFO_OFFSET : {
+   *(.rodata.kernel_info)
+   }
+   ASSERT(ABSOLUTE(kernel_info) == KERNEL_INFO_OFFSET, "kernel_info at bad 
address!")
+
.rodata..compressed : {
*(.rodata..compressed)
}
-- 
1.8.3.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH v6 03/14] x86: Secure Launch Kconfig

2023-05-04 Thread Ross Philipson
Initial bits to bring in Secure Launch functionality. Add Kconfig
options for compiling in/out the Secure Launch code.

Signed-off-by: Ross Philipson 
---
 arch/x86/Kconfig | 12 
 1 file changed, 12 insertions(+)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 53bab12..85ecf3f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2041,6 +2041,18 @@ config EFI_RUNTIME_MAP
 
  See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map.
 
+config SECURE_LAUNCH
+   bool "Secure Launch support"
+   default n
+   depends on X86_64 && X86_X2APIC
+   help
+  The Secure Launch feature allows a kernel to be loaded
+  directly through an Intel TXT measured launch. Intel TXT
+  establishes a Dynamic Root of Trust for Measurement (DRTM)
+  where the CPU measures the kernel image. This feature then
+  continues the measurement chain over kernel configuration
+  information and init images.
+
 source "kernel/Kconfig.hz"
 
 config KEXEC
-- 
1.8.3.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH v6 05/14] x86: Secure Launch main header file

2023-05-04 Thread Ross Philipson
Introduce the main Secure Launch header file used in the early SL stub
and the early setup code.

Signed-off-by: Ross Philipson 
---
 include/linux/slaunch.h | 513 
 1 file changed, 513 insertions(+)
 create mode 100644 include/linux/slaunch.h

diff --git a/include/linux/slaunch.h b/include/linux/slaunch.h
new file mode 100644
index 000..a1c3172
--- /dev/null
+++ b/include/linux/slaunch.h
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Main Secure Launch header file.
+ *
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ */
+
+#ifndef _LINUX_SLAUNCH_H
+#define _LINUX_SLAUNCH_H
+
+/*
+ * Secure Launch Defined State Flags
+ */
+#define SL_FLAG_ACTIVE 0x0001
+#define SL_FLAG_ARCH_SKINIT0x0002
+#define SL_FLAG_ARCH_TXT   0x0004
+
+/*
+ * Secure Launch CPU Type
+ */
+#define SL_CPU_AMD 1
+#define SL_CPU_INTEL   2
+
+#if IS_ENABLED(CONFIG_SECURE_LAUNCH)
+
+#define __SL32_CS  0x0008
+#define __SL32_DS  0x0010
+
+/*
+ * Intel Safer Mode Extensions (SMX)
+ *
+ * Intel SMX provides a programming interface to establish a Measured Launched
+ * Environment (MLE). The measurement and protection mechanisms supported by 
the
+ * capabilities of an Intel Trusted Execution Technology (TXT) platform. SMX is
+ * the processor’s programming interface in an Intel TXT platform.
+ *
+ * See Intel SDM Volume 2 - 6.1 "Safer Mode Extensions Reference"
+ */
+
+/*
+ * SMX GETSEC Leaf Functions
+ */
+#define SMX_X86_GETSEC_SEXIT   5
+#define SMX_X86_GETSEC_SMCTRL  7
+#define SMX_X86_GETSEC_WAKEUP  8
+
+/*
+ * Intel Trusted Execution Technology MMIO Registers Banks
+ */
+#define TXT_PUB_CONFIG_REGS_BASE   0xfed3
+#define TXT_PRIV_CONFIG_REGS_BASE  0xfed2
+#define TXT_NR_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \
+ TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT)
+
+/*
+ * Intel Trusted Execution Technology (TXT) Registers
+ */
+#define TXT_CR_STS 0x
+#define TXT_CR_ESTS0x0008
+#define TXT_CR_ERRORCODE   0x0030
+#define TXT_CR_CMD_RESET   0x0038
+#define TXT_CR_CMD_CLOSE_PRIVATE   0x0048
+#define TXT_CR_DIDVID  0x0110
+#define TXT_CR_VER_EMIF0x0200
+#define TXT_CR_CMD_UNLOCK_MEM_CONFIG   0x0218
+#define TXT_CR_SINIT_BASE  0x0270
+#define TXT_CR_SINIT_SIZE  0x0278
+#define TXT_CR_MLE_JOIN0x0290
+#define TXT_CR_HEAP_BASE   0x0300
+#define TXT_CR_HEAP_SIZE   0x0308
+#define TXT_CR_SCRATCHPAD  0x0378
+#define TXT_CR_CMD_OPEN_LOCALITY1  0x0380
+#define TXT_CR_CMD_CLOSE_LOCALITY1 0x0388
+#define TXT_CR_CMD_OPEN_LOCALITY2  0x0390
+#define TXT_CR_CMD_CLOSE_LOCALITY2 0x0398
+#define TXT_CR_CMD_SECRETS 0x08e0
+#define TXT_CR_CMD_NO_SECRETS  0x08e8
+#define TXT_CR_E2STS   0x08f0
+
+/* TXT default register value */
+#define TXT_REGVALUE_ONE   0x1ULL
+
+/* TXTCR_STS status bits */
+#define TXT_SENTER_DONE_STS(1<<0)
+#define TXT_SEXIT_DONE_STS (1<<1)
+
+/*
+ * SINIT/MLE Capabilities Field Bit Definitions
+ */
+#define TXT_SINIT_MLE_CAP_WAKE_GETSEC  0
+#define TXT_SINIT_MLE_CAP_WAKE_MONITOR 1
+
+/*
+ * OS/MLE Secure Launch Specific Definitions
+ */
+#define TXT_OS_MLE_STRUCT_VERSION  1
+#define TXT_OS_MLE_MAX_VARIABLE_MTRRS  32
+
+/*
+ * TXT Heap Table Enumeration
+ */
+#define TXT_BIOS_DATA_TABLE1
+#define TXT_OS_MLE_DATA_TABLE  2
+#define TXT_OS_SINIT_DATA_TABLE3
+#define TXT_SINIT_MLE_DATA_TABLE   4
+#define TXT_SINIT_TABLE_MAXTXT_SINIT_MLE_DATA_TABLE
+
+/*
+ * Secure Launch Defined Error Codes used in MLE-initiated TXT resets.
+ *
+ * TXT Specification
+ * Appendix I ACM Error Codes
+ */
+#define SL_ERROR_GENERIC   0xc0008001
+#define SL_ERROR_TPM_INIT  0xc0008002
+#define SL_ERROR_TPM_INVALID_LOG20 0xc0008003
+#define SL_ERROR_TPM_LOGGING_FAILED0xc0008004
+#define SL_ERROR_REGION_STRADDLE_4GB   0xc0008005
+#define SL_ERROR_TPM_EXTEND0xc0008006
+#define SL_ERROR_MTRR_INV_VCNT 0xc0008007
+#define SL_ERROR_MTRR_INV_DEF_TYPE 0xc0008008
+#define SL_ERROR_MTRR_INV_BASE 0xc0008009
+#define SL_ERROR_MTRR_INV_MASK 0xc000800a
+#define SL_ERROR_MSR_INV_MISC_EN   0xc000800b
+#define SL_ERROR_INV_AP_INTERRUPT  0xc000800c
+#define SL_ERROR_INTEGER_OVERFLOW  0xc000800d
+#define SL_ERROR_HEAP_WALK 0xc000800e
+#define SL_ERROR_HEAP_MAP  0xc000800f
+#define SL_ERROR_REGION_ABOVE_4GB  0xc0008010
+#define SL_ERROR_HEAP_INVALID_DMAR 0xc0008011
+#define SL_ERROR_HEAP_DMAR_SIZE0xc0008012
+#define SL_ERROR_HEAP_DMAR_MAP 0xc0008013
+#define SL_ERROR_HI_PMR_BASE   0xc0008014
+#define SL_ERROR_HI_PMR_SIZE   0xc0008015
+#define SL

[PATCH v6 06/14] x86: Add early SHA support for Secure Launch early measurements

2023-05-04 Thread Ross Philipson
From: "Daniel P. Smith" 

The SHA algorithms are necessary to measure configuration information into
the TPM as early as possible before using the values. This implementation
uses the established approach of #including the SHA libraries directly in
the code since the compressed kernel is not uncompressed at this point.

The SHA code here has its origins in the code from the main kernel:

commit c4d5b9ffa31f ("crypto: sha1 - implement base layer for SHA-1")

That code could not be pulled directly into the setup portion of the
compressed kernel because of other dependencies it pulls in. The result
is this is a modified copy of that code that still leverages the core
SHA algorithms.

Signed-off-by: Daniel P. Smith 
Signed-off-by: Ross Philipson 
---
 arch/x86/boot/compressed/Makefile   |  2 +
 arch/x86/boot/compressed/early_sha1.c   | 97 +
 arch/x86/boot/compressed/early_sha1.h   | 17 ++
 arch/x86/boot/compressed/early_sha256.c |  7 +++
 lib/crypto/sha1.c   |  4 ++
 lib/crypto/sha256.c |  8 +++
 6 files changed, 135 insertions(+)
 create mode 100644 arch/x86/boot/compressed/early_sha1.c
 create mode 100644 arch/x86/boot/compressed/early_sha1.h
 create mode 100644 arch/x86/boot/compressed/early_sha256.c

diff --git a/arch/x86/boot/compressed/Makefile 
b/arch/x86/boot/compressed/Makefile
index 6b6cfe6..1d327d4 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -112,6 +112,8 @@ vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
 vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
 vmlinux-objs-$(CONFIG_EFI_STUB) += 
$(objtree)/drivers/firmware/efi/libstub/lib.a
 
+vmlinux-objs-$(CONFIG_SECURE_LAUNCH) += $(obj)/early_sha1.o 
$(obj)/early_sha256.o
+
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(call if_changed,ld)
 
diff --git a/arch/x86/boot/compressed/early_sha1.c 
b/arch/x86/boot/compressed/early_sha1.c
new file mode 100644
index 000..524ec23
--- /dev/null
+++ b/arch/x86/boot/compressed/early_sha1.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Apertus Solutions, LLC.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "early_sha1.h"
+
+#define SHA1_DISABLE_EXPORT
+#include "../../../../lib/crypto/sha1.c"
+
+/* The SHA1 implementation in lib/sha1.c was written to get the workspace
+ * buffer as a parameter. This wrapper function provides a container
+ * around a temporary workspace that is cleared after the transform completes.
+ */
+static void __sha_transform(u32 *digest, const char *data)
+{
+   u32 ws[SHA1_WORKSPACE_WORDS];
+
+   sha1_transform(digest, data, ws);
+
+   memzero_explicit(ws, sizeof(ws));
+}
+
+void early_sha1_init(struct sha1_state *sctx)
+{
+   sha1_init(sctx->state);
+   sctx->count = 0;
+}
+
+void early_sha1_update(struct sha1_state *sctx,
+  const u8 *data,
+  unsigned int len)
+{
+   unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+   sctx->count += len;
+
+   if (likely((partial + len) >= SHA1_BLOCK_SIZE)) {
+   int blocks;
+
+   if (partial) {
+   int p = SHA1_BLOCK_SIZE - partial;
+
+   memcpy(sctx->buffer + partial, data, p);
+   data += p;
+   len -= p;
+
+   __sha_transform(sctx->state, sctx->buffer);
+   }
+
+   blocks = len / SHA1_BLOCK_SIZE;
+   len %= SHA1_BLOCK_SIZE;
+
+   if (blocks) {
+   while (blocks--) {
+   __sha_transform(sctx->state, data);
+   data += SHA1_BLOCK_SIZE;
+   }
+   }
+   partial = 0;
+   }
+
+   if (len)
+   memcpy(sctx->buffer + partial, data, len);
+}
+
+void early_sha1_final(struct sha1_state *sctx, u8 *out)
+{
+   const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+   unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+   __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+   __be32 *digest = (__be32 *)out;
+   int i;
+
+   sctx->buffer[partial++] = 0x80;
+   if (partial > bit_offset) {
+   memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
+   partial = 0;
+
+   __sha_transform(sctx->state, sctx->buffer);
+   }
+
+   memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+   *bits = cpu_to_be64(sctx->count << 3);
+   __sha_transform(sctx->state, sctx->buffer);
+
+   for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+   put_unaligned_be32(sctx->state[i], digest++);
+
+   *sctx = (struct sha1_state){};
+}
diff --git a/arch/x86/boot/compressed/early_sha1.h 
b/arch/x86/boot/compressed/early_sha1.h
new file mode 100644
index 000..adcc4a9
--- /dev/null
+++ b/

[PATCH v6 04/14] x86: Secure Launch Resource Table header file

2023-05-04 Thread Ross Philipson
Introduce the Secure Launch Resource Table which forms the formal
interface between the pre and post launch code.

Signed-off-by: Ross Philipson 
---
 include/linux/slr_table.h | 270 ++
 1 file changed, 270 insertions(+)
 create mode 100644 include/linux/slr_table.h

diff --git a/include/linux/slr_table.h b/include/linux/slr_table.h
new file mode 100644
index 000..d4b76e5
--- /dev/null
+++ b/include/linux/slr_table.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Secure Launch Resource Table
+ *
+ * Copyright (c) 2023, Oracle and/or its affiliates.
+ */
+
+#ifndef _LINUX_SLR_TABLE_H
+#define _LINUX_SLR_TABLE_H
+
+/* Put this in efi.h if it becomes a standard */
+#define SLR_TABLE_GUID EFI_GUID(0x877a9b2a, 0x0385, 
0x45d1, 0xa0, 0x34, 0x9d, 0xac, 0x9c, 0x9e, 0x56, 0x5f)
+
+/* SLR table header values */
+#define SLR_TABLE_MAGIC0x4452544d
+#define SLR_TABLE_REVISION 1
+
+/* Current revisions for the policy and UEFI config */
+#define SLR_POLICY_REVISION1
+#define SLR_UEFI_CONFIG_REVISION   1
+
+/* SLR defined architectures */
+#define SLR_INTEL_TXT  1
+#define SLR_AMD_SKINIT 2
+
+/* SLR defined bootloaders */
+#define SLR_BOOTLOADER_INVALID 0
+#define SLR_BOOTLOADER_GRUB1
+
+/* Log formats */
+#define SLR_DRTM_TPM12_LOG 1
+#define SLR_DRTM_TPM20_LOG 2
+
+/* DRTM Policy Entry Flags */
+#define SLR_POLICY_FLAG_MEASURED   0x1
+#define SLR_POLICY_IMPLICIT_SIZE   0x2
+
+/* Array Lengths */
+#define TPM_EVENT_INFO_LENGTH  32
+#define TXT_VARIABLE_MTRRS_LENGTH  32
+
+/* Tags */
+#define SLR_ENTRY_INVALID  0x
+#define SLR_ENTRY_DL_INFO  0x0001
+#define SLR_ENTRY_LOG_INFO 0x0002
+#define SLR_ENTRY_ENTRY_POLICY 0x0003
+#define SLR_ENTRY_INTEL_INFO   0x0004
+#define SLR_ENTRY_AMD_INFO 0x0005
+#define SLR_ENTRY_ARM_INFO 0x0006
+#define SLR_ENTRY_UEFI_INFO0x0007
+#define SLR_ENTRY_UEFI_CONFIG  0x0008
+#define SLR_ENTRY_END  0x
+
+/* Entity Types */
+#define SLR_ET_UNSPECIFIED 0x
+#define SLR_ET_SLRT0x0001
+#define SLR_ET_BOOT_PARAMS 0x0002
+#define SLR_ET_SETUP_DATA  0x0003
+#define SLR_ET_CMDLINE 0x0004
+#define SLR_ET_UEFI_MEMMAP 0x0005
+#define SLR_ET_RAMDISK 0x0006
+#define SLR_ET_TXT_OS2MLE  0x0010
+#define SLR_ET_UNUSED  0x
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Primary SLR Table Header
+ */
+struct slr_table {
+   u32 magic;
+   u16 revision;
+   u16 architecture;
+   u32 size;
+   u32 max_size;
+   /* entries[] */
+} __packed;
+
+/*
+ * Common SLRT Table Header
+ */
+struct slr_entry_hdr {
+   u16 tag;
+   u16 size;
+} __packed;
+
+/*
+ * Boot loader context
+ */
+struct slr_bl_context {
+   u16 bootloader;
+   u16 reserved;
+   u64 context;
+} __packed;
+
+/*
+ * DRTM Dynamic Launch Configuration
+ */
+struct slr_entry_dl_info {
+   struct slr_entry_hdr hdr;
+   struct slr_bl_context bl_context;
+   u64 dl_handler;
+   u64 dce_base;
+   u32 dce_size;
+   u64 dlme_entry;
+} __packed;
+
+/*
+ * TPM Log Information
+ */
+struct slr_entry_log_info {
+   struct slr_entry_hdr hdr;
+   u16 format;
+   u16 reserved;
+   u64 addr;
+   u32 size;
+} __packed;
+
+/*
+ * DRTM Measurement Policy
+ */
+struct slr_entry_policy {
+   struct slr_entry_hdr hdr;
+   u16 revision;
+   u16 nr_entries;
+   /* policy_entries[] */
+} __packed;
+
+/*
+ * DRTM Measurement Entry
+ */
+struct slr_policy_entry {
+   u16 pcr;
+   u16 entity_type;
+   u16 flags;
+   u16 reserved;
+   u64 entity;
+   u64 size;
+   char evt_info[TPM_EVENT_INFO_LENGTH];
+} __packed;
+
+/*
+ * Secure Launch defined MTRR saving structures
+ */
+struct slr_txt_mtrr_pair {
+   u64 mtrr_physbase;
+   u64 mtrr_physmask;
+} __packed;
+
+struct slr_txt_mtrr_state {
+   u64 default_mem_type;
+   u64 mtrr_vcnt;
+   struct slr_txt_mtrr_pair mtrr_pair[TXT_VARIABLE_MTRRS_LENGTH];
+} __packed;
+
+/*
+ * Intel TXT Info table
+ */
+struct slr_entry_intel_info {
+   struct slr_entry_hdr hdr;
+   u64 saved_misc_enable_msr;
+   struct slr_txt_mtrr_state saved_bsp_mtrrs;
+} __packed;
+
+/*
+ * AMD SKINIT Info table
+ */
+struct slr_entry_amd_info {
+   struct slr_entry_hdr hdr;
+} __packed;
+
+/*
+ * ARM DRTM Info table
+ */
+struct slr_entry_arm_info {
+   struct slr_entry_hdr hdr;
+} __packed;
+
+struct slr_entry_uefi_config {
+   struct slr_entry_hdr hdr;
+   u16 revision;
+   u16 nr_entries;
+   /* uefi_cfg_entries[] */
+} __packed;
+
+struct slr_uefi_cfg_entry {
+   u16 pcr;
+   u16 reserved;
+   u64 cfg; /* address or value */
+   u32 size;
+   char evt_info[TPM_EVENT_INFO_LENGTH];
+} __packed;
+
+static inline void *slr_end_of_entrys(struct slr_table *table)
+{
+   return (((void *)table) + table->si

[PATCH v6 09/14] x86: Secure Launch SMP bringup support

2023-05-04 Thread Ross Philipson
On Intel, the APs are left in a well documented state after TXT performs
the late launch. Specifically they cannot have #INIT asserted on them so
a standard startup via INIT/SIPI/SIPI cannot be performed. Instead the
early SL stub code parked the APs in a pause/jmp loop waiting for an NMI.
The modified SMP boot code is called for the Secure Launch case. The
jump address for the RM piggy entry point is fixed up in the jump where
the APs are waiting and an NMI IPI is sent to the AP. The AP vectors to
the Secure Launch entry point in the RM piggy which mimics what the real
mode code would do then jumps to the standard RM piggy protected mode
entry point.

Signed-off-by: Ross Philipson 
---
 arch/x86/include/asm/realmode.h  |  3 ++
 arch/x86/kernel/smpboot.c| 86 
 arch/x86/realmode/rm/header.S|  3 ++
 arch/x86/realmode/rm/trampoline_64.S | 37 
 4 files changed, 129 insertions(+)

diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index f6a1737..576fe62 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -38,6 +38,9 @@ struct real_mode_header {
 #ifdef CONFIG_X86_64
u32 machine_real_restart_seg;
 #endif
+#ifdef CONFIG_SECURE_LAUNCH
+   u32 sl_trampoline_start32;
+#endif
 };
 
 /* This must match data at realmode/rm/trampoline_{32,64}.S */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 352f0ce..07d740be 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -57,6 +57,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -1068,6 +1069,83 @@ int common_cpu_up(unsigned int cpu, struct task_struct 
*idle)
return 0;
 }
 
+#ifdef CONFIG_SECURE_LAUNCH
+
+static atomic_t first_ap_only = {1};
+
+/*
+ * Called to fix the long jump address for the waiting APs to vector to
+ * the correct startup location in the Secure Launch stub in the rmpiggy.
+ */
+static int
+slaunch_fixup_jump_vector(void)
+{
+   struct sl_ap_wake_info *ap_wake_info;
+   u32 *ap_jmp_ptr = NULL;
+
+   if (!atomic_dec_and_test(&first_ap_only))
+   return 0;
+
+   ap_wake_info = slaunch_get_ap_wake_info();
+
+   ap_jmp_ptr = (u32 *)__va(ap_wake_info->ap_wake_block +
+ap_wake_info->ap_jmp_offset);
+
+   *ap_jmp_ptr = real_mode_header->sl_trampoline_start32;
+
+   pr_debug("TXT AP long jump address updated\n");
+
+   return 0;
+}
+
+/*
+ * TXT AP startup is quite different than normal. The APs cannot have #INIT
+ * asserted on them or receive SIPIs. The early Secure Launch code has parked
+ * the APs in a pause loop waiting to receive an NMI. This will wake the APs
+ * and have them jump to the protected mode code in the rmpiggy where the rest
+ * of the SMP boot of the AP will proceed normally.
+ */
+static int
+slaunch_wakeup_cpu_from_txt(int cpu, int apicid)
+{
+   unsigned long send_status = 0, accept_status = 0;
+
+   /* Only done once */
+   if (slaunch_fixup_jump_vector())
+   return -1;
+
+   /* Send NMI IPI to idling AP and wake it up */
+   apic_icr_write(APIC_DM_NMI, apicid);
+
+   if (init_udelay == 0)
+   udelay(10);
+   else
+   udelay(300);
+
+   send_status = safe_apic_wait_icr_idle();
+
+   if (init_udelay == 0)
+   udelay(10);
+   else
+   udelay(300);
+
+   accept_status = (apic_read(APIC_ESR) & 0xEF);
+
+   if (send_status)
+   pr_err("Secure Launch IPI never delivered???\n");
+   if (accept_status)
+   pr_err("Secure Launch IPI delivery error (%lx)\n",
+   accept_status);
+
+   return (send_status | accept_status);
+}
+
+#else
+
+#define slaunch_wakeup_cpu_from_txt(cpu, apicid)   0
+
+#endif  /* !CONFIG_SECURE_LAUNCH */
+
 /*
  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -1132,6 +1210,13 @@ static int do_boot_cpu(int apicid, int cpu, struct 
task_struct *idle,
cpumask_clear_cpu(cpu, cpu_initialized_mask);
smp_mb();
 
+   /* With Intel TXT, the AP startup is totally different */
+   if ((slaunch_get_flags() & (SL_FLAG_ACTIVE|SL_FLAG_ARCH_TXT)) ==
+  (SL_FLAG_ACTIVE|SL_FLAG_ARCH_TXT)) {
+   boot_error = slaunch_wakeup_cpu_from_txt(cpu, apicid);
+   goto txt_wake;
+   }
+
/*
 * Wake up a CPU in difference cases:
 * - Use a method from the APIC driver if one defined, with wakeup
@@ -1147,6 +1232,7 @@ static int do_boot_cpu(int apicid, int cpu, struct 
task_struct *idle,
boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
 cpu0_nmi_registered);
 
+txt_wake:
if (!boot_error) {
/*
 * Wai

[PATCH v6 10/14] kexec: Secure Launch kexec SEXIT support

2023-05-04 Thread Ross Philipson
Prior to running the next kernel via kexec, the Secure Launch code
closes down private SMX resources and does an SEXIT. This allows the
next kernel to start normally without any issues starting the APs etc.

Signed-off-by: Ross Philipson 
---
 arch/x86/kernel/slaunch.c | 69 +++
 kernel/kexec_core.c   |  4 +++
 2 files changed, 73 insertions(+)

diff --git a/arch/x86/kernel/slaunch.c b/arch/x86/kernel/slaunch.c
index 7dba088..176c029 100644
--- a/arch/x86/kernel/slaunch.c
+++ b/arch/x86/kernel/slaunch.c
@@ -495,3 +495,72 @@ void __init slaunch_setup_txt(void)
 
pr_info("Intel TXT setup complete\n");
 }
+
+static inline void smx_getsec_sexit(void)
+{
+   asm volatile (".byte 0x0f,0x37\n"
+ : : "a" (SMX_X86_GETSEC_SEXIT));
+}
+
+void slaunch_finalize(int do_sexit)
+{
+   u64 one = TXT_REGVALUE_ONE, val;
+   void __iomem *config;
+
+   if ((slaunch_get_flags() & (SL_FLAG_ACTIVE|SL_FLAG_ARCH_TXT)) !=
+   (SL_FLAG_ACTIVE | SL_FLAG_ARCH_TXT))
+   return;
+
+   config = ioremap(TXT_PRIV_CONFIG_REGS_BASE, TXT_NR_CONFIG_PAGES *
+PAGE_SIZE);
+   if (!config) {
+   pr_emerg("Error SEXIT failed to ioremap TXT private reqs\n");
+   return;
+   }
+
+   /* Clear secrets bit for SEXIT */
+   memcpy_toio(config + TXT_CR_CMD_NO_SECRETS, &one, sizeof(one));
+   memcpy_fromio(&val, config + TXT_CR_E2STS, sizeof(val));
+
+   /* Unlock memory configurations */
+   memcpy_toio(config + TXT_CR_CMD_UNLOCK_MEM_CONFIG, &one, sizeof(one));
+   memcpy_fromio(&val, config + TXT_CR_E2STS, sizeof(val));
+
+   /* Close the TXT private register space */
+   memcpy_toio(config + TXT_CR_CMD_CLOSE_PRIVATE, &one, sizeof(one));
+   memcpy_fromio(&val, config + TXT_CR_E2STS, sizeof(val));
+
+   /*
+* Calls to iounmap are not being done because of the state of the
+* system this late in the kexec process. Local IRQs are disabled and
+* iounmap causes a TLB flush which in turn causes a warning. Leaving
+* thse mappings is not an issue since the next kernel is going to
+* completely re-setup memory management.
+*/
+
+   /* Map public registers and do a final read fence */
+   config = ioremap(TXT_PUB_CONFIG_REGS_BASE, TXT_NR_CONFIG_PAGES *
+PAGE_SIZE);
+   if (!config) {
+   pr_emerg("Error SEXIT failed to ioremap TXT public reqs\n");
+   return;
+   }
+
+   memcpy_fromio(&val, config + TXT_CR_E2STS, sizeof(val));
+
+   pr_emerg("TXT clear secrets bit and unlock memory complete.\n");
+
+   if (!do_sexit)
+   return;
+
+   if (smp_processor_id() != 0)
+   panic("Error TXT SEXIT must be called on CPU 0\n");
+
+   /* Disable SMX mode */
+   cr4_set_bits(X86_CR4_SMXE);
+
+   /* Do the SEXIT SMX operation */
+   smx_getsec_sexit();
+
+   pr_info("TXT SEXIT complete.\n");
+}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 3d578c6..5d66d68 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -1275,6 +1276,9 @@ int kernel_kexec(void)
cpu_hotplug_enable();
pr_notice("Starting new kernel\n");
machine_shutdown();
+
+   /* Finalize TXT registers and do SEXIT */
+   slaunch_finalize(1);
}
 
kmsg_dump(KMSG_DUMP_SHUTDOWN);
-- 
1.8.3.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH v6 11/14] reboot: Secure Launch SEXIT support on reboot paths

2023-05-04 Thread Ross Philipson
If the MLE kernel is being powered off, rebooted or halted,
then SEXIT must be called. Note that the SEXIT GETSEC leaf
can only be called after a machine_shutdown() has been done on
these paths. The machine_shutdown() is not called on a few paths
like when poweroff action does not have a poweroff callback (into
ACPI code) or when an emergency reset is done. In these cases,
just the TXT registers are finalized but SEXIT is skipped.

Signed-off-by: Ross Philipson 
---
 arch/x86/kernel/reboot.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 3adbe97..732c81b 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -720,6 +721,7 @@ static void native_machine_restart(char *__unused)
 
if (!reboot_force)
machine_shutdown();
+   slaunch_finalize(!reboot_force);
__machine_emergency_restart(0);
 }
 
@@ -730,6 +732,9 @@ static void native_machine_halt(void)
 
tboot_shutdown(TB_SHUTDOWN_HALT);
 
+   /* SEXIT done after machine_shutdown() to meet TXT requirements */
+   slaunch_finalize(1);
+
stop_this_cpu(NULL);
 }
 
@@ -738,8 +743,12 @@ static void native_machine_power_off(void)
if (kernel_can_power_off()) {
if (!reboot_force)
machine_shutdown();
+   slaunch_finalize(!reboot_force);
do_kernel_power_off();
+   } else {
+   slaunch_finalize(0);
}
+
/* A fallback in case there is no PM info available */
tboot_shutdown(TB_SHUTDOWN_HALT);
 }
@@ -767,6 +776,7 @@ void machine_shutdown(void)
 
 void machine_emergency_restart(void)
 {
+   slaunch_finalize(0);
__machine_emergency_restart(1);
 }
 
-- 
1.8.3.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH v6 13/14] tpm: Allow locality 2 to be set when initializing the TPM for Secure Launch

2023-05-04 Thread Ross Philipson
The Secure Launch MLE environment uses PCRs that are only accessible from
the DRTM locality 2. By default the TPM drivers always initialize the
locality to 0. When a Secure Launch is in progress, initialize the
locality to 2.

Signed-off-by: Ross Philipson 
---
 drivers/char/tpm/tpm-chip.c | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 80aaa10..5dd2eed 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -23,6 +23,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "tpm.h"
 
 DEFINE_IDR(dev_nums_idr);
@@ -34,12 +35,18 @@
 
 static int tpm_request_locality(struct tpm_chip *chip)
 {
+   int locality;
int rc;
 
if (!chip->ops->request_locality)
return 0;
 
-   rc = chip->ops->request_locality(chip, 0);
+   if (slaunch_get_flags() & SL_FLAG_ACTIVE)
+   locality = 2;
+   else
+   locality = 0;
+
+   rc = chip->ops->request_locality(chip, locality);
if (rc < 0)
return rc;
 
-- 
1.8.3.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH v6 14/14] x86: EFI stub DRTM launch support for Secure Launch

2023-05-04 Thread Ross Philipson
This support allows the DRTM launch to be initiated after and EFI stub
launch of the Linux kernel is done. This is accomplished by providing
a handler to jump to when a Secure Launch is in progress.

Signed-off-by: Ross Philipson 
---
 drivers/firmware/efi/libstub/x86-stub.c | 55 +
 1 file changed, 55 insertions(+)

diff --git a/drivers/firmware/efi/libstub/x86-stub.c 
b/drivers/firmware/efi/libstub/x86-stub.c
index a0bfd31..66ff922 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -9,6 +9,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -760,6 +761,57 @@ static efi_status_t exit_boot(struct boot_params 
*boot_params, void *handle)
return EFI_SUCCESS;
 }
 
+static void efi_secure_launch(struct boot_params *boot_params)
+{
+   struct slr_entry_uefi_config *uefi_config;
+   struct slr_uefi_cfg_entry *uefi_entry;
+   struct slr_entry_dl_info *dlinfo;
+   efi_guid_t guid = SLR_TABLE_GUID;
+   struct slr_table *slrt;
+   u64 memmap_hi;
+   void *table;
+   u8 buf[64] = {0};
+
+   table = get_efi_config_table(guid);
+
+   /*
+* The presence of this table indicated a Secure Launch
+* is being requested.
+*/
+   if (!table)
+   return;
+
+   slrt = (struct slr_table *)table;
+
+   if (slrt->magic != SLR_TABLE_MAGIC)
+   return;
+
+   /* Add config information to measure the UEFI memory map */
+   uefi_config = (struct slr_entry_uefi_config *)buf;
+   uefi_config->hdr.tag = SLR_ENTRY_UEFI_CONFIG;
+   uefi_config->hdr.size = sizeof(*uefi_config) + sizeof(*uefi_entry);
+   uefi_config->revision = SLR_UEFI_CONFIG_REVISION;
+   uefi_config->nr_entries = 1;
+   uefi_entry = (struct slr_uefi_cfg_entry *)(buf + sizeof(*uefi_config));
+   uefi_entry->pcr = 18;
+   uefi_entry->cfg = boot_params->efi_info.efi_memmap;
+   memmap_hi = boot_params->efi_info.efi_memmap_hi;
+   uefi_entry->cfg |= memmap_hi << 32;
+   uefi_entry->size = boot_params->efi_info.efi_memmap_size;
+   memcpy(&uefi_entry->evt_info[0], "Measured UEFI memory map",
+   strlen("Measured UEFI memory map"));
+
+   if (slr_add_entry(slrt, (struct slr_entry_hdr *)uefi_config))
+   return;
+
+   /* Jump through DL stub to initiate Secure Launch */
+   dlinfo = (struct slr_entry_dl_info *)
+   slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_DL_INFO);
+
+   asm volatile ("jmp *%%rax"
+ : : "a" (dlinfo->dl_handler), "D" (&dlinfo->bl_context));
+}
+
 /*
  * On success, we return the address of startup_32, which has potentially been
  * relocated by efi_relocate_kernel.
@@ -905,6 +957,9 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
goto fail;
}
 
+   /* If a secure launch is in progress, this never returns */
+   efi_secure_launch(boot_params);
+
return bzimage_addr;
 fail:
efi_err("efi_main() failed!\n");
-- 
1.8.3.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH v6 12/14] x86: Secure Launch late initcall platform module

2023-05-04 Thread Ross Philipson
From: "Daniel P. Smith" 

The Secure Launch platform module is a late init module. During the
init call, the TPM event log is read and measurements taken in the
early boot stub code are located. These measurements are extended
into the TPM PCRs using the mainline TPM kernel driver.

The platform module also registers the securityfs nodes to allow
access to TXT register fields on Intel along with the fetching of
and writing events to the late launch TPM log.

Signed-off-by: Daniel P. Smith 
Signed-off-by: garnetgrimm 
Signed-off-by: Ross Philipson 
---
 arch/x86/kernel/Makefile   |   1 +
 arch/x86/kernel/slmodule.c | 520 +
 2 files changed, 521 insertions(+)
 create mode 100644 arch/x86/kernel/slmodule.c

diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 3d2a33e..ee3fe300 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_IA32_EMULATION)  += tls.o
 obj-y  += step.o
 obj-$(CONFIG_INTEL_TXT)+= tboot.o
 obj-$(CONFIG_SECURE_LAUNCH)+= slaunch.o
+obj-$(CONFIG_SECURE_LAUNCH)+= slmodule.o
 obj-$(CONFIG_ISA_DMA_API)  += i8237.o
 obj-y  += stacktrace.o
 obj-y  += cpu/
diff --git a/arch/x86/kernel/slmodule.c b/arch/x86/kernel/slmodule.c
new file mode 100644
index 000..70dcff5
--- /dev/null
+++ b/arch/x86/kernel/slmodule.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Secure Launch late validation/setup, securityfs exposure and
+ * finalization support.
+ *
+ * Copyright (c) 2022 Apertus Solutions, LLC
+ * Copyright (c) 2021 Assured Information Security, Inc.
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ *
+ * Author(s):
+ * Daniel P. Smith 
+ * Garnet T. Grimm 
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define DECLARE_TXT_PUB_READ_U(size, fmt, msg_size)\
+static ssize_t txt_pub_read_u##size(unsigned int offset,   \
+   loff_t *read_offset,\
+   size_t read_len,\
+   char __user *buf)   \
+{  \
+   void __iomem *txt;  \
+   char msg_buffer[msg_size];  \
+   u##size reg_value = 0;  \
+   txt = ioremap(TXT_PUB_CONFIG_REGS_BASE, \
+   TXT_NR_CONFIG_PAGES * PAGE_SIZE);   \
+   if (!txt)   \
+   return -EFAULT; \
+   memcpy_fromio(®_value, txt + offset, sizeof(u##size));   \
+   iounmap(txt);   \
+   snprintf(msg_buffer, msg_size, fmt, reg_value); \
+   return simple_read_from_buffer(buf, read_len, read_offset,  \
+   &msg_buffer, msg_size); \
+}
+
+DECLARE_TXT_PUB_READ_U(8, "%#04x\n", 6);
+DECLARE_TXT_PUB_READ_U(32, "%#010x\n", 12);
+DECLARE_TXT_PUB_READ_U(64, "%#018llx\n", 20);
+
+#define DECLARE_TXT_FOPS(reg_name, reg_offset, reg_size)   \
+static ssize_t txt_##reg_name##_read(struct file *flip,
\
+   char __user *buf, size_t read_len, loff_t *read_offset) \
+{  \
+   return txt_pub_read_u##reg_size(reg_offset, read_offset,\
+   read_len, buf); \
+}  \
+static const struct file_operations reg_name##_ops = { \
+   .read = txt_##reg_name##_read,  \
+}
+
+DECLARE_TXT_FOPS(sts, TXT_CR_STS, 64);
+DECLARE_TXT_FOPS(ests, TXT_CR_ESTS, 8);
+DECLARE_TXT_FOPS(errorcode, TXT_CR_ERRORCODE, 32);
+DECLARE_TXT_FOPS(didvid, TXT_CR_DIDVID, 64);
+DECLARE_TXT_FOPS(e2sts, TXT_CR_E2STS, 64);
+DECLARE_TXT_FOPS(ver_emif, TXT_CR_VER_EMIF, 32);
+DECLARE_TXT_FOPS(scratchpad, TXT_CR_SCRATCHPAD, 64);
+
+/*
+ * Securityfs exposure
+ */
+struct memfile {
+   char *name;
+   void *addr;
+   size_t size;
+};
+
+static struct memfile sl_evtlog = {"eventlog", 0, 0};
+static void *txt_heap;
+static struct txt_heap_event_log_pointer2_1_element __iomem *evtlog20;
+static DEFINE_MUTEX(sl_evt_log_mutex);
+
+static ssize_t sl_evtlog_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+   ssize_t size;
+
+   if (!sl_evtlog.addr)
+   

[PATCH v6 00/14] x86: Trenchboot secure dynamic launch Linux kernel support

2023-05-04 Thread Ross Philipson
The larger focus of the TrenchBoot project (https://github.com/TrenchBoot) is to
enhance the boot security and integrity in a unified manner. The first area of
focus has been on the Trusted Computing Group's Dynamic Launch for establishing
a hardware Root of Trust for Measurement, also know as DRTM (Dynamic Root of
Trust for Measurement). The project has been and continues to work on providing
a unified means to Dynamic Launch that is a cross-platform (Intel and AMD) and
cross-architecture (x86 and Arm), with our recent involvment in the upcoming
Arm DRTM specification. The order of introducing DRTM to the Linux kernel
follows the maturity of DRTM in the architectures. Intel's Trusted eXecution
Technology (TXT) is present today and only requires a preamble loader, e.g. a
boot loader, and an OS kernel that is TXT-aware. AMD DRTM implementation has
been present since the introduction of AMD-V but requires an additional
component that is AMD specific and referred to in the specification as the
Secure Loader, which the TrenchBoot project has an active prototype in
development. Finally Arm's implementation is in specification development stage
and the project is looking to support it when it becomes available.

This patchset provides detailed documentation of DRTM, the approach used for
adding the capbility, and relevant API/ABI documentation. In addition to the
documentation the patch set introduces Intel TXT support as the first platform
for Linux Secure Launch.

A quick note on terminology. The larger open source project itself is called
TrenchBoot, which is hosted on Github (links below). The kernel feature enabling
the use of Dynamic Launch technology is referred to as "Secure Launch" within
the kernel code. As such the prefixes sl_/SL_ or slaunch/SLAUNCH will be seen
in the code. The stub code discussed above is referred to as the SL stub.

The Secure Launch feature starts with patch #2. Patch #1 was authored by Arvind
Sankar. There is no further status on this patch at this point but
Secure Launch depends on it so it is included with the set.

Links:

The TrenchBoot project including documentation:

https://trenchboot.org

The TrenchBoot project on Github:

https://github.com/trenchboot

Intel TXT is documented in its own specification and in the SDM Instruction Set 
volume:

https://www.intel.com/content/dam/www/public/us/en/documents/guides/intel-txt-software-development-guide.pdf
https://software.intel.com/en-us/articles/intel-sdm

AMD SKINIT is documented in the System Programming manual:

https://www.amd.com/system/files/TechDocs/24593.pdf

GRUB2 pre-launch support branch (WIP):

https://github.com/TrenchBoot/grub/tree/grub-sl-fc-38-dlstub

Thanks
Ross Philipson and Daniel P. Smith

Changes in v2:

 - Modified 32b entry code to prevent causing relocations in the compressed
   kernel.
 - Dropped patches for compressed kernel TPM PCR extender.
 - Modified event log code to insert log delimiter events and not rely
   on TPM access.
 - Stop extending PCRs in the early Secure Launch stub code.
 - Removed Kconfig options for hash algorithms and use the algorithms the
   ACM used.
 - Match Secure Launch measurement algorithm use to those reported in the
   TPM 2.0 event log.
 - Read the TPM events out of the TPM and extend them into the PCRs using
   the mainline TPM driver. This is done in the late initcall module.
 - Allow use of alternate PCR 19 and 20 for post ACM measurements.
 - Add Kconfig constraints needed by Secure Launch (disable KASLR
   and add x2apic dependency).
 - Fix testing of SL_FLAGS when determining if Secure Launch is active
   and the architecture is TXT.
 - Use SYM_DATA_START_LOCAL macros in early entry point code.
 - Security audit changes:
   - Validate buffers passed to MLE do not overlap the MLE and are
 properly laid out.
   - Validate buffers and memory regions used by the MLE are
 protected by IOMMU PMRs.
 - Force IOMMU to not use passthrough mode during a Secure Launch.
 - Prevent KASLR use during a Secure Launch.

Changes in v3:

 - Introduce x86 documentation patch to provide background, overview
   and configuration/ABI information for the Secure Launch kernel
   feature.
 - Remove the IOMMU patch with special cases for disabling IOMMU
   passthrough. Configuring the IOMMU is now a documentation matter
   in the previously mentioned new patch.
 - Remove special case KASLR disabling code. Configuring KASLR is now
   a documentation matter in the previously mentioned new patch.
 - Fix incorrect panic on TXT public register read.
 - Properly handle and measure setup_indirect bootparams in the early
   launch code.
 - Use correct compressed kernel image base address when testing buffers
   in the early launch stub code. This bug was introduced by the changes
   to avoid relocation in the compressed kernel.
 - Use CPUID feature bits instead of CPUID vendor strings to determine
   if SMX mode is supported and the system is Intel.
 - Remove early NMI re-enable on the BSP

[PATCH v6 08/14] x86: Secure Launch kernel late boot stub

2023-05-04 Thread Ross Philipson
The routine slaunch_setup is called out of the x86 specific setup_arch
routine during early kernel boot. After determining what platform is
present, various operations specific to that platform occur. This
includes finalizing setting for the platform late launch and verifying
that memory protections are in place.

For TXT, this code also reserves the original compressed kernel setup
area where the APs were left looping so that this memory cannot be used.

Signed-off-by: Ross Philipson 
---
 arch/x86/kernel/Makefile   |   1 +
 arch/x86/kernel/setup.c|   3 +
 arch/x86/kernel/slaunch.c  | 497 +
 drivers/iommu/intel/dmar.c |   4 +
 4 files changed, 505 insertions(+)
 create mode 100644 arch/x86/kernel/slaunch.c

diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index dd61752..3d2a33e 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_X86_32)  += tls.o
 obj-$(CONFIG_IA32_EMULATION)   += tls.o
 obj-y  += step.o
 obj-$(CONFIG_INTEL_TXT)+= tboot.o
+obj-$(CONFIG_SECURE_LAUNCH)+= slaunch.o
 obj-$(CONFIG_ISA_DMA_API)  += i8237.o
 obj-y  += stacktrace.o
 obj-y  += cpu/
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 16babff..592c09e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -21,6 +21,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -1063,6 +1064,8 @@ void __init setup_arch(char **cmdline_p)
early_gart_iommu_check();
 #endif
 
+   slaunch_setup_txt();
+
/*
 * partially used pages are not usable - thus
 * we are rounding upwards:
diff --git a/arch/x86/kernel/slaunch.c b/arch/x86/kernel/slaunch.c
new file mode 100644
index 000..7dba088
--- /dev/null
+++ b/arch/x86/kernel/slaunch.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Secure Launch late validation/setup and finalization support.
+ *
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static u32 sl_flags;
+static struct sl_ap_wake_info ap_wake_info;
+static u64 evtlog_addr;
+static u32 evtlog_size;
+static u64 vtd_pmr_lo_size;
+
+/* This should be plenty of room */
+static u8 txt_dmar[PAGE_SIZE] __aligned(16);
+
+u32 slaunch_get_flags(void)
+{
+   return sl_flags;
+}
+EXPORT_SYMBOL(slaunch_get_flags);
+
+struct sl_ap_wake_info *slaunch_get_ap_wake_info(void)
+{
+   return &ap_wake_info;
+}
+
+struct acpi_table_header *slaunch_get_dmar_table(struct acpi_table_header 
*dmar)
+{
+   /* The DMAR is only stashed and provided via TXT on Intel systems */
+   if (memcmp(txt_dmar, "DMAR", 4))
+   return dmar;
+
+   return (struct acpi_table_header *)(&txt_dmar[0]);
+}
+
+void __noreturn slaunch_txt_reset(void __iomem *txt,
+ const char *msg, u64 error)
+{
+   u64 one = 1, val;
+
+   pr_err("%s", msg);
+
+   /*
+* This performs a TXT reset with a sticky error code. The reads of
+* TXT_CR_E2STS act as barriers.
+*/
+   memcpy_toio(txt + TXT_CR_ERRORCODE, &error, sizeof(error));
+   memcpy_fromio(&val, txt + TXT_CR_E2STS, sizeof(val));
+   memcpy_toio(txt + TXT_CR_CMD_NO_SECRETS, &one, sizeof(one));
+   memcpy_fromio(&val, txt + TXT_CR_E2STS, sizeof(val));
+   memcpy_toio(txt + TXT_CR_CMD_UNLOCK_MEM_CONFIG, &one, sizeof(one));
+   memcpy_fromio(&val, txt + TXT_CR_E2STS, sizeof(val));
+   memcpy_toio(txt + TXT_CR_CMD_RESET, &one, sizeof(one));
+
+   for ( ; ; )
+   asm volatile ("hlt");
+
+   unreachable();
+}
+
+/*
+ * The TXT heap is too big to map all at once with early_ioremap
+ * so it is done a table at a time.
+ */
+static void __init *txt_early_get_heap_table(void __iomem *txt, u32 type,
+u32 bytes)
+{
+   u64 base, size, offset = 0;
+   void *heap;
+   int i;
+
+   if (type > TXT_SINIT_TABLE_MAX)
+   slaunch_txt_reset(txt,
+   "Error invalid table type for early heap walk\n",
+   SL_ERROR_HEAP_WALK);
+
+   memcpy_fromio(&base, txt + TXT_CR_HEAP_BASE, sizeof(base));
+   memcpy_fromio(&size, txt + TXT_CR_HEAP_SIZE, sizeof(size));
+
+   /* Iterate over heap tables looking for table of "type" */
+   for (i = 0; i < type; i++) {
+   base += offset;
+   heap = early_memremap(base, sizeof(u64));
+   if (!heap)
+   slaunch_txt_reset(txt,
+   "Error early_memremap of heap for heap walk\n",
+   SL_ERROR_HEAP_MAP);
+
+   offset = *((u64 

[PATCH 1/4] arm64: Cleanup _probe() return values

2023-05-04 Thread Jeremy Linton
The decision to process a compressed image should
be part of the image specific detection logic. As such
lets clarify the return logic, and remove the is_zlib_file()
logic in the main file type detection loop.

Signed-off-by: Jeremy Linton 
---
 kexec/arch/arm64/kexec-arm64.c |  6 ++
 kexec/arch/arm64/kexec-elf-arm64.c |  1 +
 kexec/kexec.c  | 11 ---
 3 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/kexec/arch/arm64/kexec-arm64.c b/kexec/arch/arm64/kexec-arm64.c
index ec6df4b..a36c103 100644
--- a/kexec/arch/arm64/kexec-arm64.c
+++ b/kexec/arch/arm64/kexec-arm64.c
@@ -70,6 +70,12 @@ const struct arch_map_entry arches[] = {
{ NULL, 0 },
 };
 
+/*
+ * All arm probe routines must return:
+ * -1 for invalid image
+ * 0 valid image in buffer
+ * >0 fd of valid image after decompression
+ */
 struct file_type file_type[] = {
{"vmlinux", elf_arm64_probe, elf_arm64_load, elf_arm64_usage},
{"Image", image_arm64_probe, image_arm64_load, image_arm64_usage},
diff --git a/kexec/arch/arm64/kexec-elf-arm64.c 
b/kexec/arch/arm64/kexec-elf-arm64.c
index e14f8e9..3b7e391 100644
--- a/kexec/arch/arm64/kexec-elf-arm64.c
+++ b/kexec/arch/arm64/kexec-elf-arm64.c
@@ -25,6 +25,7 @@ int elf_arm64_probe(const char *kernel_buf, off_t kernel_size)
 
if (result < 0) {
dbgprintf("%s: Not an ELF executable.\n", __func__);
+   result = -1;
goto on_exit;
}
 
diff --git a/kexec/kexec.c b/kexec/kexec.c
index 36bb2ad..97b7226 100644
--- a/kexec/kexec.c
+++ b/kexec/kexec.c
@@ -1303,14 +1303,11 @@ static int do_kexec_file_load(int fileind, int argc, 
char **argv,
for (i = 0; i < file_types; i++) {
 #ifdef __aarch64__
/* handle Image.gz like cases */
-   if (is_zlib_file(kernel, &kernel_size)) {
-   if ((ret = file_type[i].probe(kernel, kernel_size)) >= 
0) {
+   if ((ret = file_type[i].probe(kernel_buf, kernel_size)) >= 0) {
+   if (ret > 0)
kernel_fd = ret;
-   break;
-   }
-   } else
-   if (file_type[i].probe(kernel_buf, kernel_size) >= 0)
-   break;
+   break;
+   }
 #else
if (file_type[i].probe(kernel_buf, kernel_size) >= 0)
break;
-- 
2.40.0


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH 2/4] arm64: Add ZBOOT PE containing compressed image support

2023-05-04 Thread Jeremy Linton
The kernel EFI stub ZBOOT feature creates a PE that
contains a compressed linux kernel image. The stub
when run in a valid UEFI environment then decompresses
the resulting image and executes it.

Support these image formats with kexec as well to avoid
having to keep an alternate kernel image around.

This patch adds a the _probe() and usage() routines needed
for kexec to understand this format.

Signed-off-by: Jeremy Linton 
---
 kexec/arch/arm64/image-header.h|  11 ++
 kexec/arch/arm64/kexec-vmlinuz-arm64.c | 172 +
 2 files changed, 183 insertions(+)
 create mode 100644 kexec/arch/arm64/kexec-vmlinuz-arm64.c

diff --git a/kexec/arch/arm64/image-header.h b/kexec/arch/arm64/image-header.h
index 158d411..5106b67 100644
--- a/kexec/arch/arm64/image-header.h
+++ b/kexec/arch/arm64/image-header.h
@@ -35,8 +35,19 @@ struct arm64_image_header {
uint32_t pe_header;
 };
 
+/* see drivers/firmware/efi/libstub/zboot-header.S */
+struct arm64_zboot_header {
+   uint32_t mz_magic;
+uint32_t image_type;
+uint32_t payload_offset;
+uint32_t payload_size;
+uint32_t reserved[2];
+uint32_t compress_type;
+};
+
 static const uint8_t arm64_image_magic[4] = {'A', 'R', 'M', 0x64U};
 static const uint8_t arm64_image_pe_sig[2] = {'M', 'Z'};
+static const uint8_t arm64_pe_machtype[6] = {'P','E', 0x0, 0x0, 0x64, 0xAA};
 static const uint64_t arm64_image_flag_be = (1UL << 0);
 static const uint64_t arm64_image_flag_page_size = (3UL << 1);
 static const uint64_t arm64_image_flag_placement = (1UL << 3);
diff --git a/kexec/arch/arm64/kexec-vmlinuz-arm64.c 
b/kexec/arch/arm64/kexec-vmlinuz-arm64.c
new file mode 100644
index 000..7033e2e
--- /dev/null
+++ b/kexec/arch/arm64/kexec-vmlinuz-arm64.c
@@ -0,0 +1,172 @@
+/*
+ * ARM64 PE compressed Image (vmlinuz, ZBOOT) support.
+ *
+ * Several distros use 'make zinstall' rule inside
+ * 'arch/arm64/boot/Makefile' to install the arm64
+ * ZBOOT compressed file inside the boot destination
+ * directory (for e.g. /boot).
+ *
+ * Currently we cannot use kexec_file_load() to load vmlinuz
+ * PE images that self decompress.
+ *
+ * To support ZBOOT, we should:
+ * a). Copy the compressed contents of vmlinuz to a temporary file.
+ * b). Decompress (gunzip-decompress) the contents inside the
+ * temporary file.
+ * c). Validate the resulting image and write it back to the
+ * temporary file.
+ * d). Pass the 'fd' of the temporary file to the kernel space.
+ *
+ * Note this, module doesn't provide a _load() function instead
+ * relying on image_arm64_load() to load the resulting decompressed
+ * image.
+ *
+ * So basically the kernel space still gets a decompressed
+ * kernel image to load via kexec-tools.
+ */
+
+#define _GNU_SOURCE
+
+#include 
+#include 
+#include 
+#include 
+#include "crashdump-arm64.h"
+#include "image-header.h"
+#include "kexec.h"
+#include "kexec-arm64.h"
+#include "kexec-syscall.h"
+#include "kexec-zlib.h"
+#include "arch/options.h"
+
+#define FILENAME_IMAGE "/tmp/ImageXX"
+
+/* Returns:
+ * -1 : in case of error/invalid format (not a valid PE+compressed ZBOOT 
format.
+ * fd : File descriptor of the temp file containing the decompressed
+ * Image.
+ */
+int pez_arm64_probe(const char *kernel_buf, off_t kernel_size)
+{
+   int ret = -1;
+   int fd = 0;
+   int kernel_fd = 0;
+   char *fname = NULL;
+   char *kernel_uncompressed_buf = NULL;
+   off_t decompressed_size = 0;
+   const struct arm64_image_header *h;
+   const struct arm64_zboot_header *z;
+   h = (const struct arm64_image_header *)(kernel_buf);
+   z = (const struct arm64_zboot_header *)(kernel_buf);
+
+   dbgprintf("%s: PROBE.\n", __func__);
+   if (kernel_size < sizeof(struct arm64_image_header)) {
+   dbgprintf("%s: Not large enough to be a PE image.\n", __func__);
+   return -1;
+   }
+   if (!arm64_header_check_pe_sig(h)) {
+   dbgprintf("%s: Not an PE image.\n", __func__);
+   return -1;
+   }
+
+   if (kernel_size < sizeof(struct arm64_image_header) + h->pe_header) {
+   dbgprintf("%s: PE image offset larger than image.\n", __func__);
+   return -1;
+   }
+
+   if (memcmp(&kernel_buf[h->pe_header],
+  arm64_pe_machtype, sizeof(arm64_pe_machtype))) {
+   dbgprintf("%s: PE header doesn't match machine type.\n", 
__func__);
+   return -1;
+   }
+
+   if (memcmp(&z->image_type, "zimg", sizeof(z->image_type))) {
+   dbgprintf("%s: PE doesn't contain a compressed kernel.\n", 
__func__);
+   return -1;
+   }
+
+   if (memcmp(&z->compress_type, "gzip", 4) &&
+   memcmp(&z->compress_type, "lzma", 4)) {
+   dbgprintf("%s: kexec can only decompress gziped and lzma 
images.\n", __func__);
+   return -1;
+   }
+
+   if (kernel_size < z->payload_offset +

[PATCH 3/4] arm64: Hook up the ZBOOT support as vmlinuz

2023-05-04 Thread Jeremy Linton
Add the previously defined _probe() and _usage() routines
to the kexec file types table, and build the new module.

It should be noted that this "vmlinuz" support reuses the
"Image" support to actually load the resulting image after
it has been decompressed to a temporary file.

Signed-off-by: Jeremy Linton 
---
 kexec/arch/arm64/Makefile  | 3 ++-
 kexec/arch/arm64/kexec-arm64.c | 1 +
 kexec/arch/arm64/kexec-arm64.h | 3 +++
 3 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/kexec/arch/arm64/Makefile b/kexec/arch/arm64/Makefile
index d27c8ee..900f246 100644
--- a/kexec/arch/arm64/Makefile
+++ b/kexec/arch/arm64/Makefile
@@ -16,7 +16,8 @@ arm64_KEXEC_SRCS += \
kexec/arch/arm64/kexec-elf-arm64.c \
kexec/arch/arm64/kexec-uImage-arm64.c \
kexec/arch/arm64/kexec-image-arm64.c \
-   kexec/arch/arm64/kexec-zImage-arm64.c
+   kexec/arch/arm64/kexec-zImage-arm64.c \
+   kexec/arch/arm64/kexec-vmlinuz-arm64.c
 
 arm64_UIMAGE = kexec/kexec-uImage.c
 
diff --git a/kexec/arch/arm64/kexec-arm64.c b/kexec/arch/arm64/kexec-arm64.c
index a36c103..3da4c1a 100644
--- a/kexec/arch/arm64/kexec-arm64.c
+++ b/kexec/arch/arm64/kexec-arm64.c
@@ -81,6 +81,7 @@ struct file_type file_type[] = {
{"Image", image_arm64_probe, image_arm64_load, image_arm64_usage},
{"uImage", uImage_arm64_probe, uImage_arm64_load, uImage_arm64_usage},
{"zImage", zImage_arm64_probe, zImage_arm64_load, zImage_arm64_usage},
+   {"vmlinuz", pez_arm64_probe, image_arm64_load, pez_arm64_usage},
 };
 
 int file_types = sizeof(file_type) / sizeof(file_type[0]);
diff --git a/kexec/arch/arm64/kexec-arm64.h b/kexec/arch/arm64/kexec-arm64.h
index 5eb9fc0..20d39a6 100644
--- a/kexec/arch/arm64/kexec-arm64.h
+++ b/kexec/arch/arm64/kexec-arm64.h
@@ -49,6 +49,9 @@ int zImage_arm64_load(int argc, char **argv, const char 
*kernel_buf,
off_t kernel_size, struct kexec_info *info);
 void zImage_arm64_usage(void);
 
+int pez_arm64_probe(const char *kernel_buf, off_t kernel_size);
+void pez_arm64_usage(void);
+
 
 extern off_t initrd_base;
 extern off_t initrd_size;
-- 
2.40.0


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH 0/4] Support kexec'ing PEs containing compressed kernels

2023-05-04 Thread Jeremy Linton
The linux ZBOOT option creates PEs that contain compressed kernel images
which are self decompressed on execution by UEFI.

This set adds support for this image format to kexec by decompressing the
contained kernel image to a temp file, then handing the resulting image
off to the existing "Image" load routine to pass to the kexec syscall.

There is also an additional patch which cleans up some errors noticed
in the existing zImage support as well.

Jeremy Linton (4):
  arm64: Cleanup _probe() return values
  arm64: Add ZBOOT PE containing compressed image support
  arm64: Hook up the ZBOOT support as vmlinuz
  arm64: Fix some issues with zImage _probe()

 kexec/arch/arm64/Makefile  |   3 +-
 kexec/arch/arm64/image-header.h|  11 ++
 kexec/arch/arm64/kexec-arm64.c |   7 +
 kexec/arch/arm64/kexec-arm64.h |   3 +
 kexec/arch/arm64/kexec-elf-arm64.c |   1 +
 kexec/arch/arm64/kexec-vmlinuz-arm64.c | 172 +
 kexec/arch/arm64/kexec-zImage-arm64.c  |  13 +-
 kexec/kexec.c  |  11 +-
 8 files changed, 201 insertions(+), 20 deletions(-)
 create mode 100644 kexec/arch/arm64/kexec-vmlinuz-arm64.c

-- 
2.40.0


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH 4/4] arm64: Fix some issues with zImage _probe()

2023-05-04 Thread Jeremy Linton
Current compilers note that fname will be null while
attempting to print failures from strdup().

Further fix a memory leak caused by kernel_uncompressed_buf
never being used/freed before the allocated block is replaced
by the one returned by slurp_decompress_file().

Signed-off-by: Jeremy Linton 
---
 kexec/arch/arm64/kexec-zImage-arm64.c | 13 +
 1 file changed, 1 insertion(+), 12 deletions(-)

diff --git a/kexec/arch/arm64/kexec-zImage-arm64.c 
b/kexec/arch/arm64/kexec-zImage-arm64.c
index 6ee82ff..3eb1ad8 100644
--- a/kexec/arch/arm64/kexec-zImage-arm64.c
+++ b/kexec/arch/arm64/kexec-zImage-arm64.c
@@ -55,8 +55,7 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
kernel_size)
}
 
if (!(fname = strdup(FILENAME_IMAGE))) {
-   dbgprintf("%s: Can't duplicate strings %s\n", __func__,
-   fname);
+   dbgprintf("%s: Can't duplicate strings\n", __func__);
return -1;
}
 
@@ -67,15 +66,6 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
kernel_size)
goto fail_mkstemp;
}
 
-   kernel_uncompressed_buf =
-   (char *) calloc(kernel_size, sizeof(off_t));
-   if (!kernel_uncompressed_buf) {
-   dbgprintf("%s: Can't calloc %ld bytes\n",
-   __func__, kernel_size);
-   ret= -ENOMEM;
-   goto fail_calloc;
-   }
-
/* slurp in the input kernel */
dbgprintf("%s: ", __func__);
kernel_uncompressed_buf = slurp_decompress_file(kernel_buf,
@@ -128,7 +118,6 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
kernel_size)
 fail_bad_header:
free(kernel_uncompressed_buf);
 
-fail_calloc:
if (fd >= 0)
close(fd);
 
-- 
2.40.0


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [PATCH 0/4] Support kexec'ing PEs containing compressed kernels

2023-05-04 Thread Ard Biesheuvel
On Thu, 4 May 2023 at 18:41, Jeremy Linton  wrote:
>
> The linux ZBOOT option creates PEs that contain compressed kernel images
> which are self decompressed on execution by UEFI.
>
> This set adds support for this image format to kexec by decompressing the
> contained kernel image to a temp file, then handing the resulting image
> off to the existing "Image" load routine to pass to the kexec syscall.
>
> There is also an additional patch which cleans up some errors noticed
> in the existing zImage support as well.
>
> Jeremy Linton (4):
>   arm64: Cleanup _probe() return values
>   arm64: Add ZBOOT PE containing compressed image support
>   arm64: Hook up the ZBOOT support as vmlinuz
>   arm64: Fix some issues with zImage _probe()
>

Thanks a lot for taking care of this!

This all looks good to me. The only comment I have is that EFI zboot
itself is generic, even though arm64 is the only arch that distros are
building it for at the moment. So it is not unlikely that some of this
code will end up needing to be shared.

Acked-by: Ard Biesheuvel 


>  kexec/arch/arm64/Makefile  |   3 +-
>  kexec/arch/arm64/image-header.h|  11 ++
>  kexec/arch/arm64/kexec-arm64.c |   7 +
>  kexec/arch/arm64/kexec-arm64.h |   3 +
>  kexec/arch/arm64/kexec-elf-arm64.c |   1 +
>  kexec/arch/arm64/kexec-vmlinuz-arm64.c | 172 +
>  kexec/arch/arm64/kexec-zImage-arm64.c  |  13 +-
>  kexec/kexec.c  |  11 +-
>  8 files changed, 201 insertions(+), 20 deletions(-)
>  create mode 100644 kexec/arch/arm64/kexec-vmlinuz-arm64.c
>
> --
> 2.40.0
>

___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH 0/5] arm64: zboot support

2023-05-04 Thread Pingfan Liu
As more complicated capsule kernel format occurs like zboot, where the
compressed kernel is stored as a payload. The straight forward
decompression can not meet the demand.

As the first step, on aarch64, reading in the kernel file in a probe
method and decide how to unfold the content by the method itself.

The new designed probe interface returns two factors:
1. the parsed kernel_buf should be returned so that it can be used by
the image load method later.
2. the final fd passed to sys_kexec_file_load, since aarch64 kernel can
only work with Image format, the outer payload should be stripped and a
temporary file of Image should be created.


To: kexec@lists.infradead.org
Cc: ho...@verge.net.au
Cc: a...@kernel.org
Cc: jeremy.lin...@arm.com

Pingfan Liu (5):
  kexec: Adding missing free for kernel_buf
  arm64/zImage: Remove unnecessary allocation for
kernel_uncompressed_buf
  arm64: change the prototype of image probe function
  arm64: Scatter the reading of kernel file into each probe
  arm64: add support for zboot image

 kexec/arch/arm/kexec-arm.h|   4 +-
 kexec/arch/arm/kexec-uImage-arm.c |   2 +-
 kexec/arch/arm64/Makefile |   3 +-
 kexec/arch/arm64/kexec-arm64.c|   1 +
 kexec/arch/arm64/kexec-arm64.h|  13 +-
 kexec/arch/arm64/kexec-elf-arm64.c|   7 +-
 kexec/arch/arm64/kexec-image-arm64.c  |   6 +-
 kexec/arch/arm64/kexec-uImage-arm64.c |  17 +-
 kexec/arch/arm64/kexec-zImage-arm64.c |  23 +--
 kexec/arch/arm64/kexec-zboot-arm64.c  | 261 ++
 kexec/arch/arm64/zboot.h  |  26 +++
 kexec/kexec.c |  48 +++--
 kexec/kexec.h |   8 +
 13 files changed, 377 insertions(+), 42 deletions(-)
 create mode 100644 kexec/arch/arm64/kexec-zboot-arm64.c
 create mode 100644 kexec/arch/arm64/zboot.h

-- 
2.31.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH 1/5] kexec: Adding missing free for kernel_buf

2023-05-04 Thread Pingfan Liu
slurp_decompress_file() allocates memory but nowhere to free it.
Adding that missing free.

Signed-off-by: Pingfan Liu 
To: kexec@lists.infradead.org
Cc: ho...@verge.net.au
Cc: a...@kernel.org
Cc: jeremy.lin...@arm.com
---
 kexec/kexec.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/kexec/kexec.c b/kexec/kexec.c
index 36bb2ad..614cd1d 100644
--- a/kexec/kexec.c
+++ b/kexec/kexec.c
@@ -1379,6 +1379,7 @@ static int do_kexec_file_load(int fileind, int argc, char 
**argv,
}
}
 
+   free(kernel_buf);
close(kernel_fd);
return ret;
 }
-- 
2.31.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH 3/5] arm64: change the prototype of image probe function

2023-05-04 Thread Pingfan Liu
Changing the aarch64 probe's prototype  from
typedef int (probe_t)(const char *kernel_buf, off_t kernel_size);
to
typedef int (probe_t)(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info);

Later, info can be used to return both the file descriptor and parsed kernel
buffer. The fd is passed to sys_kexec_file_load, and the parsed kernel
buffer is used by image's load function.

Signed-off-by: Pingfan Liu 
To: kexec@lists.infradead.org
Cc: ho...@verge.net.au
Cc: a...@kernel.org
Cc: jeremy.lin...@arm.com

---
 kexec/arch/arm/kexec-arm.h|  4 ++--
 kexec/arch/arm/kexec-uImage-arm.c |  2 +-
 kexec/arch/arm64/kexec-arm64.h|  8 
 kexec/arch/arm64/kexec-elf-arm64.c|  2 +-
 kexec/arch/arm64/kexec-image-arm64.c  |  2 +-
 kexec/arch/arm64/kexec-uImage-arm64.c |  2 +-
 kexec/arch/arm64/kexec-zImage-arm64.c |  2 +-
 kexec/kexec.c | 15 +--
 kexec/kexec.h |  6 ++
 9 files changed, 30 insertions(+), 13 deletions(-)

diff --git a/kexec/arch/arm/kexec-arm.h b/kexec/arch/arm/kexec-arm.h
index a74cce2..18069f3 100644
--- a/kexec/arch/arm/kexec-arm.h
+++ b/kexec/arch/arm/kexec-arm.h
@@ -9,12 +9,12 @@
 
 extern off_t initrd_base, initrd_size;
 
-int zImage_arm_probe(const char *buf, off_t len);
+int zImage_arm_probe(const char *buf, off_t len, struct kexec_info *info);
 int zImage_arm_load(int argc, char **argv, const char *buf, off_t len,
struct kexec_info *info);
 void zImage_arm_usage(void);
 
-int uImage_arm_probe(const char *buf, off_t len);
+int uImage_arm_probe(const char *buf, off_t len, struct kexec_info *info);
 int uImage_arm_load(int argc, char **argv, const char *buf, off_t len,
struct kexec_info *info);
 extern int have_sysfs_fdt(void);
diff --git a/kexec/arch/arm/kexec-uImage-arm.c 
b/kexec/arch/arm/kexec-uImage-arm.c
index 03c2f4d..d955eb3 100644
--- a/kexec/arch/arm/kexec-uImage-arm.c
+++ b/kexec/arch/arm/kexec-uImage-arm.c
@@ -9,7 +9,7 @@
 #include "../../kexec.h"
 #include "kexec-arm.h"
 
-int uImage_arm_probe(const char *buf, off_t len)
+int uImage_arm_probe(const char *buf, off_t len, struct kexec_info *info)
 {
return uImage_probe_kernel(buf, len, IH_ARCH_ARM);
 }
diff --git a/kexec/arch/arm64/kexec-arm64.h b/kexec/arch/arm64/kexec-arm64.h
index 5eb9fc0..88bb508 100644
--- a/kexec/arch/arm64/kexec-arm64.h
+++ b/kexec/arch/arm64/kexec-arm64.h
@@ -29,22 +29,22 @@
 #define NOT_KV_ADDR(0x0)
 #define NOT_PADDR  (ULONGLONG_MAX)
 
-int elf_arm64_probe(const char *kernel_buf, off_t kernel_size);
+int elf_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info);
 int elf_arm64_load(int argc, char **argv, const char *kernel_buf,
off_t kernel_size, struct kexec_info *info);
 void elf_arm64_usage(void);
 
-int image_arm64_probe(const char *kernel_buf, off_t kernel_size);
+int image_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info);
 int image_arm64_load(int argc, char **argv, const char *kernel_buf,
off_t kernel_size, struct kexec_info *info);
 void image_arm64_usage(void);
 
-int uImage_arm64_probe(const char *buf, off_t len);
+int uImage_arm64_probe(const char *buf, off_t len, struct kexec_info *info);
 int uImage_arm64_load(int argc, char **argv, const char *buf, off_t len,
  struct kexec_info *info);
 void uImage_arm64_usage(void);
 
-int zImage_arm64_probe(const char *kernel_buf, off_t kernel_size);
+int zImage_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info);
 int zImage_arm64_load(int argc, char **argv, const char *kernel_buf,
off_t kernel_size, struct kexec_info *info);
 void zImage_arm64_usage(void);
diff --git a/kexec/arch/arm64/kexec-elf-arm64.c 
b/kexec/arch/arm64/kexec-elf-arm64.c
index e14f8e9..9238dd5 100644
--- a/kexec/arch/arm64/kexec-elf-arm64.c
+++ b/kexec/arch/arm64/kexec-elf-arm64.c
@@ -16,7 +16,7 @@
 #include "kexec-elf.h"
 #include "kexec-syscall.h"
 
-int elf_arm64_probe(const char *kernel_buf, off_t kernel_size)
+int elf_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info)
 {
struct mem_ehdr ehdr;
int result;
diff --git a/kexec/arch/arm64/kexec-image-arm64.c 
b/kexec/arch/arm64/kexec-image-arm64.c
index aa8f2e2..84aca72 100644
--- a/kexec/arch/arm64/kexec-image-arm64.c
+++ b/kexec/arch/arm64/kexec-image-arm64.c
@@ -14,7 +14,7 @@
 #include "kexec-syscall.h"
 #include "arch/options.h"
 
-int image_arm64_probe(const char *kernel_buf, off_t kernel_size)
+int image_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info)
 {
const struct arm64_image_header *h;
 
diff --git a/kexec/arch/arm64/kexec-uImage-arm64.c 
b/kexec/arch/arm64/kexec-uImage-arm64.c
index c466913..f5b94c8 100644
--- a/kexec/arch/arm64/kexec-uImage-arm64.c
+++ b/kexec/arch/arm64/kexec-uImage-arm64.c
@@ -9,7 +9,7 @@
 #include "../../kex

[PATCH 4/5] arm64: Scatter the logic of reading of kernel file into each probe

2023-05-04 Thread Pingfan Liu
As more complicated capsule kernel format occurs like zboot, where the
compressed kernel is stored as a payload. The straight forward
decompression can not meet the demand.

So reading in the kernel file in a probe method and decide how to unfold
the content by the method itself.

Signed-off-by: Pingfan Liu 
To: kexec@lists.infradead.org
Cc: ho...@verge.net.au
Cc: a...@kernel.org
Cc: jeremy.lin...@arm.com

---
 kexec/arch/arm64/kexec-elf-arm64.c|  7 -
 kexec/arch/arm64/kexec-image-arm64.c  |  6 +++-
 kexec/arch/arm64/kexec-uImage-arm64.c | 17 ---
 kexec/arch/arm64/kexec-zImage-arm64.c | 13 -
 kexec/kexec.c | 42 ++-
 kexec/kexec.h |  2 ++
 6 files changed, 60 insertions(+), 27 deletions(-)

diff --git a/kexec/arch/arm64/kexec-elf-arm64.c 
b/kexec/arch/arm64/kexec-elf-arm64.c
index 9238dd5..cedf3c6 100644
--- a/kexec/arch/arm64/kexec-elf-arm64.c
+++ b/kexec/arch/arm64/kexec-elf-arm64.c
@@ -16,11 +16,13 @@
 #include "kexec-elf.h"
 #include "kexec-syscall.h"
 
-int elf_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info)
+int elf_arm64_probe(const char *kern_fname, off_t kernel_size, struct 
kexec_info *info)
 {
struct mem_ehdr ehdr;
+   char *kernel_buf;
int result;
 
+   kernel_buf = slurp_file(kern_fname, &kernel_size);
result = build_elf_exec_info(kernel_buf, kernel_size, &ehdr, 0);
 
if (result < 0) {
@@ -34,8 +36,11 @@ int elf_arm64_probe(const char *kernel_buf, off_t 
kernel_size, struct kexec_info
goto on_exit;
}
 
+   info->kernel_fd = open(kern_fname, O_RDONLY);
+   info->kernel_buf = kernel_buf;
result = 0;
 on_exit:
+   free(kernel_buf);
free_elf_info(&ehdr);
return result;
 }
diff --git a/kexec/arch/arm64/kexec-image-arm64.c 
b/kexec/arch/arm64/kexec-image-arm64.c
index 84aca72..90cd40a 100644
--- a/kexec/arch/arm64/kexec-image-arm64.c
+++ b/kexec/arch/arm64/kexec-image-arm64.c
@@ -14,10 +14,12 @@
 #include "kexec-syscall.h"
 #include "arch/options.h"
 
-int image_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info)
+int image_arm64_probe(const char *kern_fname, off_t kernel_size, struct 
kexec_info *info)
 {
const struct arm64_image_header *h;
+   char *kernel_buf;
 
+   kernel_buf = slurp_file(kern_fname, &kernel_size);
if (kernel_size < sizeof(struct arm64_image_header)) {
dbgprintf("%s: No arm64 image header.\n", __func__);
return -1;
@@ -29,6 +31,8 @@ int image_arm64_probe(const char *kernel_buf, off_t 
kernel_size, struct kexec_in
dbgprintf("%s: Bad arm64 image header.\n", __func__);
return -1;
}
+   info->kernel_fd = open(kern_fname, O_RDONLY);
+   info->kernel_buf = kernel_buf;
 
return 0;
 }
diff --git a/kexec/arch/arm64/kexec-uImage-arm64.c 
b/kexec/arch/arm64/kexec-uImage-arm64.c
index f5b94c8..cce1c76 100644
--- a/kexec/arch/arm64/kexec-uImage-arm64.c
+++ b/kexec/arch/arm64/kexec-uImage-arm64.c
@@ -3,26 +3,35 @@
  */
 #include 
 #include 
+#include 
+#include 
 #include 
 #include 
 #include 
 #include "../../kexec.h"
 #include "kexec-arm64.h"
 
-int uImage_arm64_probe(const char *buf, off_t len, struct kexec_info *info)
+int uImage_arm64_probe(const char *kern_fname, off_t len, struct kexec_info 
*info)
 {
int ret;
+   char *kernel_buf;
 
-   ret = uImage_probe_kernel(buf, len, IH_ARCH_ARM64);
+   kernel_buf = slurp_file(kern_fname, &len);
+   ret = uImage_probe_kernel(kernel_buf, len, IH_ARCH_ARM64);
 
/*  0 - valid uImage.
 * -1 - uImage is corrupted.
 *  1 - image is not a uImage.
 */
-   if (!ret)
+   if (!ret) {
+   info->kernel_fd = open(kern_fname, O_RDONLY);
+   info->kernel_buf = kernel_buf;
return 0;
-   else
+   }
+   else {
+   free(kernel_buf);
return -1;
+   }
 }
 
 int uImage_arm64_load(int argc, char **argv, const char *buf, off_t len,
diff --git a/kexec/arch/arm64/kexec-zImage-arm64.c 
b/kexec/arch/arm64/kexec-zImage-arm64.c
index 7877741..450a915 100644
--- a/kexec/arch/arm64/kexec-zImage-arm64.c
+++ b/kexec/arch/arm64/kexec-zImage-arm64.c
@@ -40,7 +40,7 @@
  * fd : File descriptor of the temp file containing the decompressed
  *  Image.
  */
-int zImage_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info)
+int zImage_arm64_probe(const char *kernel, off_t kernel_size, struct 
kexec_info *info)
 {
int ret = -1;
int fd = 0;
@@ -49,7 +49,7 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
kernel_size, struct kexec_i
char *kernel_uncompressed_buf = NULL;
const struct arm64_image_header *h;
 
-   if (!is_zlib_file(kernel_buf, &kernel_size)) {
+   if (!is_zlib_file(kernel, &kernel_size)) {
   

[PATCH 4/5] arm64: Scatter the reading of kernel file into each probe

2023-05-04 Thread Pingfan Liu
As more complicated capsule kernel format occurs like zboot, where the
compressed kernel is stored as a payload. The straight forward
decompression can not meet the demand.

So reading in the kernel file in a probe file and decide how to unfold
the content by the method itself.

Signed-off-by: Pingfan Liu 
To: kexec@lists.infradead.org
Cc: ho...@verge.net.au
Cc: a...@kernel.org
Cc: jeremy.lin...@arm.com

---
 kexec/arch/arm64/kexec-elf-arm64.c|  7 -
 kexec/arch/arm64/kexec-image-arm64.c  |  6 +++-
 kexec/arch/arm64/kexec-uImage-arm64.c | 17 ---
 kexec/arch/arm64/kexec-zImage-arm64.c | 13 -
 kexec/kexec.c | 42 ++-
 kexec/kexec.h |  2 ++
 6 files changed, 60 insertions(+), 27 deletions(-)

diff --git a/kexec/arch/arm64/kexec-elf-arm64.c 
b/kexec/arch/arm64/kexec-elf-arm64.c
index 9238dd5..cedf3c6 100644
--- a/kexec/arch/arm64/kexec-elf-arm64.c
+++ b/kexec/arch/arm64/kexec-elf-arm64.c
@@ -16,11 +16,13 @@
 #include "kexec-elf.h"
 #include "kexec-syscall.h"
 
-int elf_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info)
+int elf_arm64_probe(const char *kern_fname, off_t kernel_size, struct 
kexec_info *info)
 {
struct mem_ehdr ehdr;
+   char *kernel_buf;
int result;
 
+   kernel_buf = slurp_file(kern_fname, &kernel_size);
result = build_elf_exec_info(kernel_buf, kernel_size, &ehdr, 0);
 
if (result < 0) {
@@ -34,8 +36,11 @@ int elf_arm64_probe(const char *kernel_buf, off_t 
kernel_size, struct kexec_info
goto on_exit;
}
 
+   info->kernel_fd = open(kern_fname, O_RDONLY);
+   info->kernel_buf = kernel_buf;
result = 0;
 on_exit:
+   free(kernel_buf);
free_elf_info(&ehdr);
return result;
 }
diff --git a/kexec/arch/arm64/kexec-image-arm64.c 
b/kexec/arch/arm64/kexec-image-arm64.c
index 84aca72..90cd40a 100644
--- a/kexec/arch/arm64/kexec-image-arm64.c
+++ b/kexec/arch/arm64/kexec-image-arm64.c
@@ -14,10 +14,12 @@
 #include "kexec-syscall.h"
 #include "arch/options.h"
 
-int image_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info)
+int image_arm64_probe(const char *kern_fname, off_t kernel_size, struct 
kexec_info *info)
 {
const struct arm64_image_header *h;
+   char *kernel_buf;
 
+   kernel_buf = slurp_file(kern_fname, &kernel_size);
if (kernel_size < sizeof(struct arm64_image_header)) {
dbgprintf("%s: No arm64 image header.\n", __func__);
return -1;
@@ -29,6 +31,8 @@ int image_arm64_probe(const char *kernel_buf, off_t 
kernel_size, struct kexec_in
dbgprintf("%s: Bad arm64 image header.\n", __func__);
return -1;
}
+   info->kernel_fd = open(kern_fname, O_RDONLY);
+   info->kernel_buf = kernel_buf;
 
return 0;
 }
diff --git a/kexec/arch/arm64/kexec-uImage-arm64.c 
b/kexec/arch/arm64/kexec-uImage-arm64.c
index f5b94c8..cce1c76 100644
--- a/kexec/arch/arm64/kexec-uImage-arm64.c
+++ b/kexec/arch/arm64/kexec-uImage-arm64.c
@@ -3,26 +3,35 @@
  */
 #include 
 #include 
+#include 
+#include 
 #include 
 #include 
 #include 
 #include "../../kexec.h"
 #include "kexec-arm64.h"
 
-int uImage_arm64_probe(const char *buf, off_t len, struct kexec_info *info)
+int uImage_arm64_probe(const char *kern_fname, off_t len, struct kexec_info 
*info)
 {
int ret;
+   char *kernel_buf;
 
-   ret = uImage_probe_kernel(buf, len, IH_ARCH_ARM64);
+   kernel_buf = slurp_file(kern_fname, &len);
+   ret = uImage_probe_kernel(kernel_buf, len, IH_ARCH_ARM64);
 
/*  0 - valid uImage.
 * -1 - uImage is corrupted.
 *  1 - image is not a uImage.
 */
-   if (!ret)
+   if (!ret) {
+   info->kernel_fd = open(kern_fname, O_RDONLY);
+   info->kernel_buf = kernel_buf;
return 0;
-   else
+   }
+   else {
+   free(kernel_buf);
return -1;
+   }
 }
 
 int uImage_arm64_load(int argc, char **argv, const char *buf, off_t len,
diff --git a/kexec/arch/arm64/kexec-zImage-arm64.c 
b/kexec/arch/arm64/kexec-zImage-arm64.c
index 7877741..450a915 100644
--- a/kexec/arch/arm64/kexec-zImage-arm64.c
+++ b/kexec/arch/arm64/kexec-zImage-arm64.c
@@ -40,7 +40,7 @@
  * fd : File descriptor of the temp file containing the decompressed
  *  Image.
  */
-int zImage_arm64_probe(const char *kernel_buf, off_t kernel_size, struct 
kexec_info *info)
+int zImage_arm64_probe(const char *kernel, off_t kernel_size, struct 
kexec_info *info)
 {
int ret = -1;
int fd = 0;
@@ -49,7 +49,7 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
kernel_size, struct kexec_i
char *kernel_uncompressed_buf = NULL;
const struct arm64_image_header *h;
 
-   if (!is_zlib_file(kernel_buf, &kernel_size)) {
+   if (!is_zlib_file(kernel, &kernel_size)) {
 

[PATCH 2/5] arm64/zImage: Remove unnecessary allocation for kernel_uncompressed_buf

2023-05-04 Thread Pingfan Liu
Since slurp_decompress_file() allocates the buffer to hold the content
read from the file, no need to allocate the buffer again.

Signed-off-by: Pingfan Liu 
To: kexec@lists.infradead.org
Cc: ho...@verge.net.au
Cc: a...@kernel.org
Cc: jeremy.lin...@arm.com

---
 kexec/arch/arm64/kexec-zImage-arm64.c | 10 --
 1 file changed, 10 deletions(-)

diff --git a/kexec/arch/arm64/kexec-zImage-arm64.c 
b/kexec/arch/arm64/kexec-zImage-arm64.c
index 6ee82ff..166d7ef 100644
--- a/kexec/arch/arm64/kexec-zImage-arm64.c
+++ b/kexec/arch/arm64/kexec-zImage-arm64.c
@@ -67,15 +67,6 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
kernel_size)
goto fail_mkstemp;
}
 
-   kernel_uncompressed_buf =
-   (char *) calloc(kernel_size, sizeof(off_t));
-   if (!kernel_uncompressed_buf) {
-   dbgprintf("%s: Can't calloc %ld bytes\n",
-   __func__, kernel_size);
-   ret= -ENOMEM;
-   goto fail_calloc;
-   }
-
/* slurp in the input kernel */
dbgprintf("%s: ", __func__);
kernel_uncompressed_buf = slurp_decompress_file(kernel_buf,
@@ -128,7 +119,6 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
kernel_size)
 fail_bad_header:
free(kernel_uncompressed_buf);
 
-fail_calloc:
if (fd >= 0)
close(fd);
 
-- 
2.31.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [PATCH 0/4] Support kexec'ing PEs containing compressed kernels

2023-05-04 Thread Pingfan Liu
Hi Jeremy,

Thanks for sharing this. When I get your series, my series is close to
end, so just post it.

My 2/5 is identical to your 4/4, and can be dropped. My 5/5 is similar
to your 2/4, which can be dropped either.

The main difference is about the image probe method. Since the
introduction of zboot format, it is not proper any more to call the
slurp_decompress_file() function once before attempting to call
various possible probes. So my series scatters the
slurp_decompress_file() into each probe, and return two factors: the
kernel fd which is used by sys_kexec_file_load and the parsed kernel
buf, which is used later by the image probe function.

On Fri, May 5, 2023 at 12:44 AM Jeremy Linton  wrote:
>
> The linux ZBOOT option creates PEs that contain compressed kernel images
> which are self decompressed on execution by UEFI.
>
> This set adds support for this image format to kexec by decompressing the
> contained kernel image to a temp file, then handing the resulting image
> off to the existing "Image" load routine to pass to the kexec syscall.
>
> There is also an additional patch which cleans up some errors noticed
> in the existing zImage support as well.
>
> Jeremy Linton (4):
>   arm64: Cleanup _probe() return values
>   arm64: Add ZBOOT PE containing compressed image support
>   arm64: Hook up the ZBOOT support as vmlinuz
>   arm64: Fix some issues with zImage _probe()
>
>  kexec/arch/arm64/Makefile  |   3 +-
>  kexec/arch/arm64/image-header.h|  11 ++
>  kexec/arch/arm64/kexec-arm64.c |   7 +
>  kexec/arch/arm64/kexec-arm64.h |   3 +
>  kexec/arch/arm64/kexec-elf-arm64.c |   1 +
>  kexec/arch/arm64/kexec-vmlinuz-arm64.c | 172 +
>  kexec/arch/arm64/kexec-zImage-arm64.c  |  13 +-
>  kexec/kexec.c  |  11 +-
>  8 files changed, 201 insertions(+), 20 deletions(-)
>  create mode 100644 kexec/arch/arm64/kexec-vmlinuz-arm64.c
>
> --
> 2.40.0
>
>
> ___
> kexec mailing list
> kexec@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/kexec
>


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [PATCH 4/4] arm64: Fix some issues with zImage _probe()

2023-05-04 Thread Pingfan Liu
On Fri, May 5, 2023 at 12:44 AM Jeremy Linton  wrote:
>
> Current compilers note that fname will be null while
> attempting to print failures from strdup().
>
> Further fix a memory leak caused by kernel_uncompressed_buf
> never being used/freed before the allocated block is replaced
> by the one returned by slurp_decompress_file().
>
> Signed-off-by: Jeremy Linton 
> ---
>  kexec/arch/arm64/kexec-zImage-arm64.c | 13 +
>  1 file changed, 1 insertion(+), 12 deletions(-)
>
> diff --git a/kexec/arch/arm64/kexec-zImage-arm64.c 
> b/kexec/arch/arm64/kexec-zImage-arm64.c
> index 6ee82ff..3eb1ad8 100644
> --- a/kexec/arch/arm64/kexec-zImage-arm64.c
> +++ b/kexec/arch/arm64/kexec-zImage-arm64.c
> @@ -55,8 +55,7 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
> kernel_size)
> }
>
> if (!(fname = strdup(FILENAME_IMAGE))) {
> -   dbgprintf("%s: Can't duplicate strings %s\n", __func__,
> -   fname);
> +   dbgprintf("%s: Can't duplicate strings\n", __func__);
> return -1;
> }
>
> @@ -67,15 +66,6 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
> kernel_size)
> goto fail_mkstemp;
> }
>
> -   kernel_uncompressed_buf =
> -   (char *) calloc(kernel_size, sizeof(off_t));
> -   if (!kernel_uncompressed_buf) {
> -   dbgprintf("%s: Can't calloc %ld bytes\n",
> -   __func__, kernel_size);
> -   ret= -ENOMEM;
> -   goto fail_calloc;
> -   }
> -
> /* slurp in the input kernel */
> dbgprintf("%s: ", __func__);
> kernel_uncompressed_buf = slurp_decompress_file(kernel_buf,
> @@ -128,7 +118,6 @@ int zImage_arm64_probe(const char *kernel_buf, off_t 
> kernel_size)
>  fail_bad_header:
> free(kernel_uncompressed_buf);
>
> -fail_calloc:
> if (fd >= 0)
> close(fd);
>
> --
> 2.40.0
>
Reviewed-by: Pingfan Liu 


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [PATCH 2/4] arm64: Add ZBOOT PE containing compressed image support

2023-05-04 Thread Pingfan Liu
On Fri, May 5, 2023 at 12:44 AM Jeremy Linton  wrote:
>
> The kernel EFI stub ZBOOT feature creates a PE that
> contains a compressed linux kernel image. The stub
> when run in a valid UEFI environment then decompresses
> the resulting image and executes it.
>
> Support these image formats with kexec as well to avoid
> having to keep an alternate kernel image around.
>
> This patch adds a the _probe() and usage() routines needed
> for kexec to understand this format.
>
> Signed-off-by: Jeremy Linton 
> ---
>  kexec/arch/arm64/image-header.h|  11 ++
>  kexec/arch/arm64/kexec-vmlinuz-arm64.c | 172 +
>  2 files changed, 183 insertions(+)
>  create mode 100644 kexec/arch/arm64/kexec-vmlinuz-arm64.c
>
> diff --git a/kexec/arch/arm64/image-header.h b/kexec/arch/arm64/image-header.h
> index 158d411..5106b67 100644
> --- a/kexec/arch/arm64/image-header.h
> +++ b/kexec/arch/arm64/image-header.h
> @@ -35,8 +35,19 @@ struct arm64_image_header {
> uint32_t pe_header;
>  };
>
> +/* see drivers/firmware/efi/libstub/zboot-header.S */
> +struct arm64_zboot_header {
> +   uint32_t mz_magic;
> +uint32_t image_type;
> +uint32_t payload_offset;
> +uint32_t payload_size;
> +uint32_t reserved[2];
> +uint32_t compress_type;
> +};
> +
>  static const uint8_t arm64_image_magic[4] = {'A', 'R', 'M', 0x64U};
>  static const uint8_t arm64_image_pe_sig[2] = {'M', 'Z'};
> +static const uint8_t arm64_pe_machtype[6] = {'P','E', 0x0, 0x0, 0x64, 0xAA};
>  static const uint64_t arm64_image_flag_be = (1UL << 0);
>  static const uint64_t arm64_image_flag_page_size = (3UL << 1);
>  static const uint64_t arm64_image_flag_placement = (1UL << 3);
> diff --git a/kexec/arch/arm64/kexec-vmlinuz-arm64.c 
> b/kexec/arch/arm64/kexec-vmlinuz-arm64.c
> new file mode 100644
> index 000..7033e2e
> --- /dev/null
> +++ b/kexec/arch/arm64/kexec-vmlinuz-arm64.c
> @@ -0,0 +1,172 @@
> +/*
> + * ARM64 PE compressed Image (vmlinuz, ZBOOT) support.
> + *
> + * Several distros use 'make zinstall' rule inside
> + * 'arch/arm64/boot/Makefile' to install the arm64
> + * ZBOOT compressed file inside the boot destination
> + * directory (for e.g. /boot).
> + *
> + * Currently we cannot use kexec_file_load() to load vmlinuz
> + * PE images that self decompress.
> + *
> + * To support ZBOOT, we should:
> + * a). Copy the compressed contents of vmlinuz to a temporary file.
> + * b). Decompress (gunzip-decompress) the contents inside the
> + * temporary file.
> + * c). Validate the resulting image and write it back to the
> + * temporary file.
> + * d). Pass the 'fd' of the temporary file to the kernel space.
> + *
> + * Note this, module doesn't provide a _load() function instead
> + * relying on image_arm64_load() to load the resulting decompressed
> + * image.
> + *
> + * So basically the kernel space still gets a decompressed
> + * kernel image to load via kexec-tools.
> + */
> +
> +#define _GNU_SOURCE
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include "crashdump-arm64.h"
> +#include "image-header.h"
> +#include "kexec.h"
> +#include "kexec-arm64.h"
> +#include "kexec-syscall.h"
> +#include "kexec-zlib.h"
> +#include "arch/options.h"
> +
> +#define FILENAME_IMAGE "/tmp/ImageXX"
> +
> +/* Returns:
> + * -1 : in case of error/invalid format (not a valid PE+compressed ZBOOT 
> format.
> + * fd : File descriptor of the temp file containing the decompressed
> + * Image.
> + */
> +int pez_arm64_probe(const char *kernel_buf, off_t kernel_size)
> +{
> +   int ret = -1;
> +   int fd = 0;
> +   int kernel_fd = 0;
> +   char *fname = NULL;
> +   char *kernel_uncompressed_buf = NULL;
> +   off_t decompressed_size = 0;
> +   const struct arm64_image_header *h;
> +   const struct arm64_zboot_header *z;
> +   h = (const struct arm64_image_header *)(kernel_buf);
> +   z = (const struct arm64_zboot_header *)(kernel_buf);
> +
> +   dbgprintf("%s: PROBE.\n", __func__);
> +   if (kernel_size < sizeof(struct arm64_image_header)) {
> +   dbgprintf("%s: Not large enough to be a PE image.\n", 
> __func__);
> +   return -1;
> +   }
> +   if (!arm64_header_check_pe_sig(h)) {
> +   dbgprintf("%s: Not an PE image.\n", __func__);
> +   return -1;
> +   }
> +
> +   if (kernel_size < sizeof(struct arm64_image_header) + h->pe_header) {
> +   dbgprintf("%s: PE image offset larger than image.\n", 
> __func__);
> +   return -1;
> +   }
> +
> +   if (memcmp(&kernel_buf[h->pe_header],
> +  arm64_pe_machtype, sizeof(arm64_pe_machtype))) {
> +   dbgprintf("%s: PE header doesn't match machine type.\n", 
> __func__);
> +   return -1;
> +   }
> +
> +   if (memcmp(&z->image_type, "zimg", sizeof(z->image_type))) {
> +   dbgprintf("%s: PE doesn't contain a compressed kernel.\n", 
> __func_