On Tue, Dec 5, 2017 at 12:51 AM, Chao Fan <fanc.f...@cn.fujitsu.com> wrote:
> In current code, kaslr may choose the memory region in movable
> nodes to extract kernel, which will make the nodes can't be hot-removed.
> To solve it, we can specify the memory region in immovable node.
> Create immovable_mem to store the regions in immovable_mem, where should
> be chosen by kaslr.
>
> Multiple regions can be specified, comma delimited.
> Considering the usage of memory, only support for 4 regions.
> 4 regions contains 2 nodes at least, enough for kernel to extract.
>
> Also change the "handle_mem_memmap" to "handle_mem_filter", since
> it will not only handle memmap parameter now.

I would put all immovable region functions and variables behind
#ifdefs. e.g. parse_immovable_mem() is unused without
CONFIG_MEMORY_HOTPLUG. I also wonder if it might be possible to reuse
some of the parsing code and just pass in which array you're updating
(i.e. memmap array vs immovable array).

-Kees

>
> Signed-off-by: Chao Fan <fanc.f...@cn.fujitsu.com>
> ---
>  arch/x86/boot/compressed/kaslr.c | 80 
> ++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 77 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/kaslr.c 
> b/arch/x86/boot/compressed/kaslr.c
> index a63fbc25ce84..0bbbaf5f6370 100644
> --- a/arch/x86/boot/compressed/kaslr.c
> +++ b/arch/x86/boot/compressed/kaslr.c
> @@ -108,6 +108,15 @@ enum mem_avoid_index {
>
>  static struct mem_vector mem_avoid[MEM_AVOID_MAX];
>
> +/* Only supporting at most 4 immovable memory regions with kaslr */
> +#define MAX_IMMOVABLE_MEM      4
> +
> +/* Store the memory regions in immovable node */
> +static struct mem_vector immovable_mem[MAX_IMMOVABLE_MEM];
> +
> +/* The immovable regions user specify, not more than 4 */
> +static int num_immovable_region;
> +
>  static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
>  {
>         /* Item one is entirely before item two. */
> @@ -168,6 +177,38 @@ parse_memmap(char *p, unsigned long long *start, 
> unsigned long long *size)
>         return -EINVAL;
>  }
>
> +static int parse_immovable_mem(char *p,
> +                              unsigned long long *start,
> +                              unsigned long long *size)
> +{
> +       char *oldp;
> +
> +       if (!p)
> +               return -EINVAL;
> +
> +       oldp = p;
> +       *size = memparse(p, &p);
> +       if (p == oldp)
> +               return -EINVAL;
> +
> +       /* We support nn[KMG]@ss[KMG] and nn[KMG]. */
> +       switch (*p) {
> +       case '@':
> +               *start = memparse(p + 1, &p);
> +               return 0;
> +       default:
> +               /*
> +                * If w/o offset, only size specified, immovable_mem=nn[KMG]
> +                * has the same behaviour as immovable_mem=nn[KMG]@0. It means
> +                * the region starts from 0.
> +                */
> +               *start = 0;
> +               return 0;
> +       }
> +
> +       return -EINVAL;
> +}
> +
>  static void mem_avoid_memmap(char *str)
>  {
>         static int i;
> @@ -207,7 +248,37 @@ static void mem_avoid_memmap(char *str)
>                 memmap_too_large = true;
>  }
>
> -static int handle_mem_memmap(void)
> +#ifdef CONFIG_MEMORY_HOTPLUG
> +static void parse_immovable_mem_regions(char *str)
> +{
> +       static int i;
> +
> +       while (str && (i < MAX_IMMOVABLE_MEM)) {
> +               int rc;
> +               unsigned long long start, size;
> +               char *k = strchr(str, ',');
> +
> +               if (k)
> +                       *k++ = 0;
> +
> +               rc = parse_immovable_mem(str, &start, &size);
> +               if (rc < 0)
> +                       break;
> +               str = k;
> +
> +               immovable_mem[i].start = start;
> +               immovable_mem[i].size = size;
> +               i++;
> +       }
> +       num_immovable_region = i;
> +}
> +#else
> +static inline void parse_immovable_mem_regions(char *str)
> +{
> +}
> +#endif
> +
> +static int handle_mem_filter(void)
>  {
>         char *args = (char *)get_cmd_line_ptr();
>         size_t len = strlen((char *)args);
> @@ -215,7 +286,8 @@ static int handle_mem_memmap(void)
>         char *param, *val;
>         u64 mem_size;
>
> -       if (!strstr(args, "memmap=") && !strstr(args, "mem="))
> +       if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
> +           !strstr(args, "immovable_mem="))
>                 return 0;
>
>         tmp_cmdline = malloc(len + 1);
> @@ -240,6 +312,8 @@ static int handle_mem_memmap(void)
>
>                 if (!strcmp(param, "memmap")) {
>                         mem_avoid_memmap(val);
> +               } else if (!strcmp(param, "immovable_mem")) {
> +                       parse_immovable_mem_regions(val);
>                 } else if (!strcmp(param, "mem")) {
>                         char *p = val;
>
> @@ -379,7 +453,7 @@ static void mem_avoid_init(unsigned long input, unsigned 
> long input_size,
>         /* We don't need to set a mapping for setup_data. */
>
>         /* Mark the memmap regions we need to avoid */
> -       handle_mem_memmap();
> +       handle_mem_filter();
>
>  #ifdef CONFIG_X86_VERBOSE_BOOTUP
>         /* Make sure video RAM can be used. */
> --
> 2.14.3
>
>
>



-- 
Kees Cook
Pixel Security

Reply via email to