[tip:x86/microcode] x86/microcode/intel: Improve microcode patches saving flow

2017-08-29 Thread tip-bot for Borislav Petkov
Commit-ID:  aa78c1ccfab6018289bc2bfd0092d516d0a49ec5
Gitweb: http://git.kernel.org/tip/aa78c1ccfab6018289bc2bfd0092d516d0a49ec5
Author: Borislav Petkov 
AuthorDate: Fri, 25 Aug 2017 12:04:56 +0200
Committer:  Thomas Gleixner 
CommitDate: Tue, 29 Aug 2017 10:59:28 +0200

x86/microcode/intel: Improve microcode patches saving flow

Avoid potentially dereferencing a NULL pointer when saving a microcode
patch for early loading on the application processors.

While at it, drop the IS_ERR() checking in favor of simpler, NULL-ptr
checks which are sufficient and rename __alloc_microcode_buf() to
memdup_patch() to more precisely denote what it does.

No functionality change.

Reported-by: Dan Carpenter 
Signed-off-by: Borislav Petkov 
Signed-off-by: Thomas Gleixner 
Cc: kernel-janit...@vger.kernel.org
Link: http://lkml.kernel.org/r/20170825100456.n236w3jebteok...@pd.tnic

---
 arch/x86/kernel/cpu/microcode/intel.c | 27 ++-
 1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kernel/cpu/microcode/intel.c 
b/arch/x86/kernel/cpu/microcode/intel.c
index 59edbe9..8f7a9bb 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -146,18 +146,18 @@ static bool microcode_matches(struct 
microcode_header_intel *mc_header,
return false;
 }
 
-static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
+static struct ucode_patch *memdup_patch(void *data, unsigned int size)
 {
struct ucode_patch *p;
 
p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
if (!p)
-   return ERR_PTR(-ENOMEM);
+   return NULL;
 
p->data = kmemdup(data, size, GFP_KERNEL);
if (!p->data) {
kfree(p);
-   return ERR_PTR(-ENOMEM);
+   return NULL;
}
 
return p;
@@ -183,8 +183,8 @@ static void save_microcode_patch(void *data, unsigned int 
size)
if (mc_hdr->rev <= mc_saved_hdr->rev)
continue;
 
-   p = __alloc_microcode_buf(data, size);
-   if (IS_ERR(p))
+   p = memdup_patch(data, size);
+   if (!p)
pr_err("Error allocating buffer %p\n", data);
else
list_replace(>plist, >plist);
@@ -196,24 +196,25 @@ static void save_microcode_patch(void *data, unsigned int 
size)
 * newly found.
 */
if (!prev_found) {
-   p = __alloc_microcode_buf(data, size);
-   if (IS_ERR(p))
+   p = memdup_patch(data, size);
+   if (!p)
pr_err("Error allocating buffer for %p\n", data);
else
list_add_tail(>plist, _cache);
}
 
+   if (!p)
+   return;
+
/*
 * Save for early loading. On 32-bit, that needs to be a physical
 * address as the APs are running from physical addresses, before
 * paging has been enabled.
 */
-   if (p) {
-   if (IS_ENABLED(CONFIG_X86_32))
-   intel_ucode_patch = (struct microcode_intel 
*)__pa_nodebug(p->data);
-   else
-   intel_ucode_patch = p->data;
-   }
+   if (IS_ENABLED(CONFIG_X86_32))
+   intel_ucode_patch = (struct microcode_intel 
*)__pa_nodebug(p->data);
+   else
+   intel_ucode_patch = p->data;
 }
 
 static int microcode_sanity_check(void *mc, int print_err)


[tip:x86/microcode] x86/microcode/intel: Improve microcode patches saving flow

2017-08-29 Thread tip-bot for Borislav Petkov
Commit-ID:  aa78c1ccfab6018289bc2bfd0092d516d0a49ec5
Gitweb: http://git.kernel.org/tip/aa78c1ccfab6018289bc2bfd0092d516d0a49ec5
Author: Borislav Petkov 
AuthorDate: Fri, 25 Aug 2017 12:04:56 +0200
Committer:  Thomas Gleixner 
CommitDate: Tue, 29 Aug 2017 10:59:28 +0200

x86/microcode/intel: Improve microcode patches saving flow

Avoid potentially dereferencing a NULL pointer when saving a microcode
patch for early loading on the application processors.

While at it, drop the IS_ERR() checking in favor of simpler, NULL-ptr
checks which are sufficient and rename __alloc_microcode_buf() to
memdup_patch() to more precisely denote what it does.

No functionality change.

Reported-by: Dan Carpenter 
Signed-off-by: Borislav Petkov 
Signed-off-by: Thomas Gleixner 
Cc: kernel-janit...@vger.kernel.org
Link: http://lkml.kernel.org/r/20170825100456.n236w3jebteok...@pd.tnic

---
 arch/x86/kernel/cpu/microcode/intel.c | 27 ++-
 1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kernel/cpu/microcode/intel.c 
b/arch/x86/kernel/cpu/microcode/intel.c
index 59edbe9..8f7a9bb 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -146,18 +146,18 @@ static bool microcode_matches(struct 
microcode_header_intel *mc_header,
return false;
 }
 
-static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
+static struct ucode_patch *memdup_patch(void *data, unsigned int size)
 {
struct ucode_patch *p;
 
p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
if (!p)
-   return ERR_PTR(-ENOMEM);
+   return NULL;
 
p->data = kmemdup(data, size, GFP_KERNEL);
if (!p->data) {
kfree(p);
-   return ERR_PTR(-ENOMEM);
+   return NULL;
}
 
return p;
@@ -183,8 +183,8 @@ static void save_microcode_patch(void *data, unsigned int 
size)
if (mc_hdr->rev <= mc_saved_hdr->rev)
continue;
 
-   p = __alloc_microcode_buf(data, size);
-   if (IS_ERR(p))
+   p = memdup_patch(data, size);
+   if (!p)
pr_err("Error allocating buffer %p\n", data);
else
list_replace(>plist, >plist);
@@ -196,24 +196,25 @@ static void save_microcode_patch(void *data, unsigned int 
size)
 * newly found.
 */
if (!prev_found) {
-   p = __alloc_microcode_buf(data, size);
-   if (IS_ERR(p))
+   p = memdup_patch(data, size);
+   if (!p)
pr_err("Error allocating buffer for %p\n", data);
else
list_add_tail(>plist, _cache);
}
 
+   if (!p)
+   return;
+
/*
 * Save for early loading. On 32-bit, that needs to be a physical
 * address as the APs are running from physical addresses, before
 * paging has been enabled.
 */
-   if (p) {
-   if (IS_ENABLED(CONFIG_X86_32))
-   intel_ucode_patch = (struct microcode_intel 
*)__pa_nodebug(p->data);
-   else
-   intel_ucode_patch = p->data;
-   }
+   if (IS_ENABLED(CONFIG_X86_32))
+   intel_ucode_patch = (struct microcode_intel 
*)__pa_nodebug(p->data);
+   else
+   intel_ucode_patch = p->data;
 }
 
 static int microcode_sanity_check(void *mc, int print_err)