Re: [PATCH 3/3] x86/microcode: Quiesce all threads before a microcode update.

2018-02-22 Thread Tom Lendacky
On 2/21/2018 2:13 PM, Raj, Ashok wrote:
> On Wed, Feb 21, 2018 at 08:06:11PM +0100, Borislav Petkov wrote:
>>>  arch/x86/kernel/cpu/microcode/core.c  | 113 
>>> +-
>>
>> This is generic so Tom needs to ack whatever we end up doing for the AMD
>> side.
> 
> Yes, i did ping Tom to check if this is ok with them.

I did some testing with these patches and didn't notice any issues on my
EPYC system.  At the moment, I currently don't have access to anything
older on which to test.  But I don't believe there should be any issues
with this approach.  I'll retest when we get closer to the final version
of the patch.

Thanks,
Tom

> >>
>>>  arch/x86/kernel/cpu/microcode/intel.c |   1 +
>>>  2 files changed, 98 insertions(+), 16 deletions(-)
>>>
>>> diff --git a/arch/x86/kernel/cpu/microcode/core.c 
>>> b/arch/x86/kernel/cpu/microcode/core.c
>>> index aa1b9a4..af0aeb2 100644
>>> --- a/arch/x86/kernel/cpu/microcode/core.c
>>> +++ b/arch/x86/kernel/cpu/microcode/core.c
>>> @@ -31,6 +31,9 @@
>>>  #include 
>>>  #include 
>>>  #include 
>>> +#include 
>>> +#include 
>>> +#include 
>>>  
>>>  #include 
>>>  #include 
>>> @@ -489,19 +492,82 @@ static void __exit microcode_dev_exit(void)
>>>  /* fake device for request_firmware */
>>>  static struct platform_device  *microcode_pdev;
>>>  
>>> -static enum ucode_state reload_for_cpu(int cpu)
>>> +static struct ucode_update_param {
>>> +   spinlock_t ucode_lock;
>>> +   atomic_t   count;
>>> +   atomic_t   errors;
>>> +   atomic_t   enter;
>>> +   inttimeout;
>>> +} uc_data;
>>> +
>>> +static void do_ucode_update(int cpu, struct ucode_update_param *ucd)
>>>  {
>>> -   struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
>>> -   enum ucode_state ustate;
>>> +   enum ucode_state retval = 0;
>>>  
>>> -   if (!uci->valid)
>>> -   return UCODE_OK;
>>> +   spin_lock(>ucode_lock);
>>> +   retval = microcode_ops->apply_microcode(cpu);
>>> +   spin_unlock(>ucode_lock);
>>
>> What's the spinlock protecting against?
> 
> This is ensuring no 2 cpus do ucode update at the same time.
> 
> Since all cpus wait for all the online cpus to arrive in stop_machine handler.
> Once we let go, every cpu tries to update. This just serializes against that.
> 
>>
>> We hold the hotplug lock and the microcode mutex. And yet interrupts are
>> still enabled. So what's up?
> 
> hotplug lock/microcode mutex are at global level, these are 
> protecting individual cpus in stop machine trying to update microcode.
> 
> these are called while in stop_machine() so i think interrupts are disabled 
> IRC.
> 
>>
>>
>>> +   if (retval > UCODE_NFOUND) {
>>> +   atomic_inc(>errors);
>>
>> You don't need ->errors. Simply propagate retval from do_ucode_update().
>> Or compare ucd->count to the number of CPUs. Or something like that.
> 
> That's what we are doing here, but simply returning number of cpus
> that encountered failure instead of a per-cpu retval
> like before.
> 
> I use ucd->count to use as an exit rendezvous.. to make sure we leave only
> after all cpus have done updating ucode.
> 
>>> +   pr_warn("microcode update to cpu %d failed\n", cpu);
>>> +   }
>>> +   atomic_inc(>count);
>>> +}
>>> +
>>> +/*
>>> + * Wait for upto 1sec for all cpus
>>> + * to show up in the rendezvous function
>>> + */
>>> +#define MAX_UCODE_RENDEZVOUS   10 /* nanosec */
>>
>>  1 * NSEC_PER_SEC
>>
>>> +#define SPINUNIT   100/* 100ns */
>>> +
>>> +/*
>>> + * Each cpu waits for 1sec max.
>>> + */
>>> +static int ucode_wait_timedout(int *time_out, void *data)
>>> +{
>>> +   struct ucode_update_param *ucd = data;
>>> +   if (*time_out < SPINUNIT) {
>>> +   pr_err("Not all cpus entered ucode update handler %d cpus 
>>> missing\n",
>>> +   (num_online_cpus() - atomic_read(>enter)));
>>> +   return 1;
>>> +   }
>>> +   *time_out -= SPINUNIT;
>>> +   touch_nmi_watchdog();
>>> +   return 0;
>>> +}
>>> +
>>> +/*
>>> + * All cpus enter here before a ucode load upto 1 sec.
>>> + * If not all cpus showed up, we abort the ucode update
>>> + * and return. ucode update is serialized with the spinlock
>>
>> ... and yet you don't check stop_machine()'s retval and issue an error
>> message that it failed.
>>
> 
> Will add that 
> 
>>> + */
>>> +static int ucode_load_rendezvous(void *data)
>>
>> The correct prefix is "microcode_"
>>
>>> +{
>>> +   int cpu = smp_processor_id();
>>> +   struct ucode_update_param *ucd = data;
>>> +   int timeout = MAX_UCODE_RENDEZVOUS;
>>> +   int total_cpus = num_online_cpus();
>>>  
>>> -   ustate = microcode_ops->request_microcode_fw(cpu, _pdev->dev, 
>>> true);
>>> -   if (ustate != UCODE_OK)
>>> -   return ustate;
>>> +   /*
>>> +* Wait for all cpu's to arrive
>>> +*/
>>> +   atomic_dec(>enter);
>>> +   while(atomic_read(>enter)) {
>>> +   if (ucode_wait_timedout(, ucd))
>>> +   return 1;
>>> +   ndelay(SPINUNIT);
>>> 

Re: [PATCH 3/3] x86/microcode: Quiesce all threads before a microcode update.

2018-02-22 Thread Tom Lendacky
On 2/21/2018 2:13 PM, Raj, Ashok wrote:
> On Wed, Feb 21, 2018 at 08:06:11PM +0100, Borislav Petkov wrote:
>>>  arch/x86/kernel/cpu/microcode/core.c  | 113 
>>> +-
>>
>> This is generic so Tom needs to ack whatever we end up doing for the AMD
>> side.
> 
> Yes, i did ping Tom to check if this is ok with them.

I did some testing with these patches and didn't notice any issues on my
EPYC system.  At the moment, I currently don't have access to anything
older on which to test.  But I don't believe there should be any issues
with this approach.  I'll retest when we get closer to the final version
of the patch.

Thanks,
Tom

> >>
>>>  arch/x86/kernel/cpu/microcode/intel.c |   1 +
>>>  2 files changed, 98 insertions(+), 16 deletions(-)
>>>
>>> diff --git a/arch/x86/kernel/cpu/microcode/core.c 
>>> b/arch/x86/kernel/cpu/microcode/core.c
>>> index aa1b9a4..af0aeb2 100644
>>> --- a/arch/x86/kernel/cpu/microcode/core.c
>>> +++ b/arch/x86/kernel/cpu/microcode/core.c
>>> @@ -31,6 +31,9 @@
>>>  #include 
>>>  #include 
>>>  #include 
>>> +#include 
>>> +#include 
>>> +#include 
>>>  
>>>  #include 
>>>  #include 
>>> @@ -489,19 +492,82 @@ static void __exit microcode_dev_exit(void)
>>>  /* fake device for request_firmware */
>>>  static struct platform_device  *microcode_pdev;
>>>  
>>> -static enum ucode_state reload_for_cpu(int cpu)
>>> +static struct ucode_update_param {
>>> +   spinlock_t ucode_lock;
>>> +   atomic_t   count;
>>> +   atomic_t   errors;
>>> +   atomic_t   enter;
>>> +   inttimeout;
>>> +} uc_data;
>>> +
>>> +static void do_ucode_update(int cpu, struct ucode_update_param *ucd)
>>>  {
>>> -   struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
>>> -   enum ucode_state ustate;
>>> +   enum ucode_state retval = 0;
>>>  
>>> -   if (!uci->valid)
>>> -   return UCODE_OK;
>>> +   spin_lock(>ucode_lock);
>>> +   retval = microcode_ops->apply_microcode(cpu);
>>> +   spin_unlock(>ucode_lock);
>>
>> What's the spinlock protecting against?
> 
> This is ensuring no 2 cpus do ucode update at the same time.
> 
> Since all cpus wait for all the online cpus to arrive in stop_machine handler.
> Once we let go, every cpu tries to update. This just serializes against that.
> 
>>
>> We hold the hotplug lock and the microcode mutex. And yet interrupts are
>> still enabled. So what's up?
> 
> hotplug lock/microcode mutex are at global level, these are 
> protecting individual cpus in stop machine trying to update microcode.
> 
> these are called while in stop_machine() so i think interrupts are disabled 
> IRC.
> 
>>
>>
>>> +   if (retval > UCODE_NFOUND) {
>>> +   atomic_inc(>errors);
>>
>> You don't need ->errors. Simply propagate retval from do_ucode_update().
>> Or compare ucd->count to the number of CPUs. Or something like that.
> 
> That's what we are doing here, but simply returning number of cpus
> that encountered failure instead of a per-cpu retval
> like before.
> 
> I use ucd->count to use as an exit rendezvous.. to make sure we leave only
> after all cpus have done updating ucode.
> 
>>> +   pr_warn("microcode update to cpu %d failed\n", cpu);
>>> +   }
>>> +   atomic_inc(>count);
>>> +}
>>> +
>>> +/*
>>> + * Wait for upto 1sec for all cpus
>>> + * to show up in the rendezvous function
>>> + */
>>> +#define MAX_UCODE_RENDEZVOUS   10 /* nanosec */
>>
>>  1 * NSEC_PER_SEC
>>
>>> +#define SPINUNIT   100/* 100ns */
>>> +
>>> +/*
>>> + * Each cpu waits for 1sec max.
>>> + */
>>> +static int ucode_wait_timedout(int *time_out, void *data)
>>> +{
>>> +   struct ucode_update_param *ucd = data;
>>> +   if (*time_out < SPINUNIT) {
>>> +   pr_err("Not all cpus entered ucode update handler %d cpus 
>>> missing\n",
>>> +   (num_online_cpus() - atomic_read(>enter)));
>>> +   return 1;
>>> +   }
>>> +   *time_out -= SPINUNIT;
>>> +   touch_nmi_watchdog();
>>> +   return 0;
>>> +}
>>> +
>>> +/*
>>> + * All cpus enter here before a ucode load upto 1 sec.
>>> + * If not all cpus showed up, we abort the ucode update
>>> + * and return. ucode update is serialized with the spinlock
>>
>> ... and yet you don't check stop_machine()'s retval and issue an error
>> message that it failed.
>>
> 
> Will add that 
> 
>>> + */
>>> +static int ucode_load_rendezvous(void *data)
>>
>> The correct prefix is "microcode_"
>>
>>> +{
>>> +   int cpu = smp_processor_id();
>>> +   struct ucode_update_param *ucd = data;
>>> +   int timeout = MAX_UCODE_RENDEZVOUS;
>>> +   int total_cpus = num_online_cpus();
>>>  
>>> -   ustate = microcode_ops->request_microcode_fw(cpu, _pdev->dev, 
>>> true);
>>> -   if (ustate != UCODE_OK)
>>> -   return ustate;
>>> +   /*
>>> +* Wait for all cpu's to arrive
>>> +*/
>>> +   atomic_dec(>enter);
>>> +   while(atomic_read(>enter)) {
>>> +   if (ucode_wait_timedout(, ucd))
>>> +   return 1;
>>> +   ndelay(SPINUNIT);
>>> 

Re: [PATCH 3/3] x86/microcode: Quiesce all threads before a microcode update.

2018-02-21 Thread Borislav Petkov
On Wed, Feb 21, 2018 at 12:13:08PM -0800, Raj, Ashok wrote:
> This is ensuring no 2 cpus do ucode update at the same time.

And that is a problem?

We don't do any of that mutual exclusion for early loading. Why isn't it
there a problem?

> That's what we are doing here, but simply returning number of cpus
> that encountered failure instead of a per-cpu retval
> like before.

You still don't need ->errors.

> When we online any of the offline cpu's we do a microcode load again right?

That doesn't make any sense. First you say:

"the Intel microcode team asked us to make sure the system is in a quiet
state during these updates."

When you've updated the microcode on a subset of the cores and then the
other cores come up and you do that in the hotplug notifier, the system
is far from quiet. On the contrary, it is busy bootstrapping cores.

Which makes me wonder if that "quiet" argument even means anything.

Because if you wanna do that in the notifiers, you can just as well
offline all the cores but the BSP, update the microcode there and then
online the rest again ---> no need for that patch at all.

> Not sure what you mean by jumping through hoops need to be extracted away.. 

Take that code:

+   memset(_data, 0, sizeof(struct ucode_update_param));
+   spin_lock_init(_data.ucode_lock);
+   atomic_set(_data.enter, num_online_cpus());
+   /*
+* Wait for a 1 sec
+*/
+   uc_data.timeout = USEC_PER_SEC;
+   stop_machine(ucode_load_rendezvous, _data, cpu_online_mask);
+
+   pr_debug("Total CPUS = %d uperrors = %d\n",
+   atomic_read(_data.count), atomic_read(_data.errors));
+
+   if (atomic_read(_data.errors))

and put it in a separate function which you call in reload_store().

-- 
Regards/Gruss,
Boris.

SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 
(AG Nürnberg)
-- 


Re: [PATCH 3/3] x86/microcode: Quiesce all threads before a microcode update.

2018-02-21 Thread Borislav Petkov
On Wed, Feb 21, 2018 at 12:13:08PM -0800, Raj, Ashok wrote:
> This is ensuring no 2 cpus do ucode update at the same time.

And that is a problem?

We don't do any of that mutual exclusion for early loading. Why isn't it
there a problem?

> That's what we are doing here, but simply returning number of cpus
> that encountered failure instead of a per-cpu retval
> like before.

You still don't need ->errors.

> When we online any of the offline cpu's we do a microcode load again right?

That doesn't make any sense. First you say:

"the Intel microcode team asked us to make sure the system is in a quiet
state during these updates."

When you've updated the microcode on a subset of the cores and then the
other cores come up and you do that in the hotplug notifier, the system
is far from quiet. On the contrary, it is busy bootstrapping cores.

Which makes me wonder if that "quiet" argument even means anything.

Because if you wanna do that in the notifiers, you can just as well
offline all the cores but the BSP, update the microcode there and then
online the rest again ---> no need for that patch at all.

> Not sure what you mean by jumping through hoops need to be extracted away.. 

Take that code:

+   memset(_data, 0, sizeof(struct ucode_update_param));
+   spin_lock_init(_data.ucode_lock);
+   atomic_set(_data.enter, num_online_cpus());
+   /*
+* Wait for a 1 sec
+*/
+   uc_data.timeout = USEC_PER_SEC;
+   stop_machine(ucode_load_rendezvous, _data, cpu_online_mask);
+
+   pr_debug("Total CPUS = %d uperrors = %d\n",
+   atomic_read(_data.count), atomic_read(_data.errors));
+
+   if (atomic_read(_data.errors))

and put it in a separate function which you call in reload_store().

-- 
Regards/Gruss,
Boris.

SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 
(AG Nürnberg)
-- 


Re: [PATCH 3/3] x86/microcode: Quiesce all threads before a microcode update.

2018-02-21 Thread Raj, Ashok
On Wed, Feb 21, 2018 at 08:06:11PM +0100, Borislav Petkov wrote:
> >  arch/x86/kernel/cpu/microcode/core.c  | 113 
> > +-
> 
> This is generic so Tom needs to ack whatever we end up doing for the AMD
> side.

Yes, i did ping Tom to check if this is ok with them.

> 
> >  arch/x86/kernel/cpu/microcode/intel.c |   1 +
> >  2 files changed, 98 insertions(+), 16 deletions(-)
> > 
> > diff --git a/arch/x86/kernel/cpu/microcode/core.c 
> > b/arch/x86/kernel/cpu/microcode/core.c
> > index aa1b9a4..af0aeb2 100644
> > --- a/arch/x86/kernel/cpu/microcode/core.c
> > +++ b/arch/x86/kernel/cpu/microcode/core.c
> > @@ -31,6 +31,9 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> > +#include 
> > +#include 
> >  
> >  #include 
> >  #include 
> > @@ -489,19 +492,82 @@ static void __exit microcode_dev_exit(void)
> >  /* fake device for request_firmware */
> >  static struct platform_device  *microcode_pdev;
> >  
> > -static enum ucode_state reload_for_cpu(int cpu)
> > +static struct ucode_update_param {
> > +   spinlock_t ucode_lock;
> > +   atomic_t   count;
> > +   atomic_t   errors;
> > +   atomic_t   enter;
> > +   inttimeout;
> > +} uc_data;
> > +
> > +static void do_ucode_update(int cpu, struct ucode_update_param *ucd)
> >  {
> > -   struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
> > -   enum ucode_state ustate;
> > +   enum ucode_state retval = 0;
> >  
> > -   if (!uci->valid)
> > -   return UCODE_OK;
> > +   spin_lock(>ucode_lock);
> > +   retval = microcode_ops->apply_microcode(cpu);
> > +   spin_unlock(>ucode_lock);
> 
> What's the spinlock protecting against?

This is ensuring no 2 cpus do ucode update at the same time.

Since all cpus wait for all the online cpus to arrive in stop_machine handler.
Once we let go, every cpu tries to update. This just serializes against that.

> 
> We hold the hotplug lock and the microcode mutex. And yet interrupts are
> still enabled. So what's up?

hotplug lock/microcode mutex are at global level, these are 
protecting individual cpus in stop machine trying to update microcode.

these are called while in stop_machine() so i think interrupts are disabled IRC.

> 
> 
> > +   if (retval > UCODE_NFOUND) {
> > +   atomic_inc(>errors);
> 
> You don't need ->errors. Simply propagate retval from do_ucode_update().
> Or compare ucd->count to the number of CPUs. Or something like that.

That's what we are doing here, but simply returning number of cpus
that encountered failure instead of a per-cpu retval
like before.

I use ucd->count to use as an exit rendezvous.. to make sure we leave only
after all cpus have done updating ucode.

> > +   pr_warn("microcode update to cpu %d failed\n", cpu);
> > +   }
> > +   atomic_inc(>count);
> > +}
> > +
> > +/*
> > + * Wait for upto 1sec for all cpus
> > + * to show up in the rendezvous function
> > + */
> > +#define MAX_UCODE_RENDEZVOUS   10 /* nanosec */
> 
>   1 * NSEC_PER_SEC
> 
> > +#define SPINUNIT   100/* 100ns */
> > +
> > +/*
> > + * Each cpu waits for 1sec max.
> > + */
> > +static int ucode_wait_timedout(int *time_out, void *data)
> > +{
> > +   struct ucode_update_param *ucd = data;
> > +   if (*time_out < SPINUNIT) {
> > +   pr_err("Not all cpus entered ucode update handler %d cpus 
> > missing\n",
> > +   (num_online_cpus() - atomic_read(>enter)));
> > +   return 1;
> > +   }
> > +   *time_out -= SPINUNIT;
> > +   touch_nmi_watchdog();
> > +   return 0;
> > +}
> > +
> > +/*
> > + * All cpus enter here before a ucode load upto 1 sec.
> > + * If not all cpus showed up, we abort the ucode update
> > + * and return. ucode update is serialized with the spinlock
> 
> ... and yet you don't check stop_machine()'s retval and issue an error
> message that it failed.
> 

Will add that 

> > + */
> > +static int ucode_load_rendezvous(void *data)
> 
> The correct prefix is "microcode_"
> 
> > +{
> > +   int cpu = smp_processor_id();
> > +   struct ucode_update_param *ucd = data;
> > +   int timeout = MAX_UCODE_RENDEZVOUS;
> > +   int total_cpus = num_online_cpus();
> >  
> > -   ustate = microcode_ops->request_microcode_fw(cpu, _pdev->dev, 
> > true);
> > -   if (ustate != UCODE_OK)
> > -   return ustate;
> > +   /*
> > +* Wait for all cpu's to arrive
> > +*/
> > +   atomic_dec(>enter);
> > +   while(atomic_read(>enter)) {
> > +   if (ucode_wait_timedout(, ucd))
> > +   return 1;
> > +   ndelay(SPINUNIT);
> > +   }
> > +
> > +   do_ucode_update(cpu, ucd);
> >  
> > -   return apply_microcode_on_target(cpu);
> > +   /*
> > +* Wait for all cpu's to complete
> > +* ucode update
> > +*/
> > +   while (atomic_read(>count) != total_cpus)
> > +   cpu_relax();
> > +   return 0;
> >  }
> >  
> >  static ssize_t reload_store(struct device *dev,
> > @@ -509,7 +575,6 @@ static ssize_t reload_store(struct device 

Re: [PATCH 3/3] x86/microcode: Quiesce all threads before a microcode update.

2018-02-21 Thread Raj, Ashok
On Wed, Feb 21, 2018 at 08:06:11PM +0100, Borislav Petkov wrote:
> >  arch/x86/kernel/cpu/microcode/core.c  | 113 
> > +-
> 
> This is generic so Tom needs to ack whatever we end up doing for the AMD
> side.

Yes, i did ping Tom to check if this is ok with them.

> 
> >  arch/x86/kernel/cpu/microcode/intel.c |   1 +
> >  2 files changed, 98 insertions(+), 16 deletions(-)
> > 
> > diff --git a/arch/x86/kernel/cpu/microcode/core.c 
> > b/arch/x86/kernel/cpu/microcode/core.c
> > index aa1b9a4..af0aeb2 100644
> > --- a/arch/x86/kernel/cpu/microcode/core.c
> > +++ b/arch/x86/kernel/cpu/microcode/core.c
> > @@ -31,6 +31,9 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> > +#include 
> > +#include 
> >  
> >  #include 
> >  #include 
> > @@ -489,19 +492,82 @@ static void __exit microcode_dev_exit(void)
> >  /* fake device for request_firmware */
> >  static struct platform_device  *microcode_pdev;
> >  
> > -static enum ucode_state reload_for_cpu(int cpu)
> > +static struct ucode_update_param {
> > +   spinlock_t ucode_lock;
> > +   atomic_t   count;
> > +   atomic_t   errors;
> > +   atomic_t   enter;
> > +   inttimeout;
> > +} uc_data;
> > +
> > +static void do_ucode_update(int cpu, struct ucode_update_param *ucd)
> >  {
> > -   struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
> > -   enum ucode_state ustate;
> > +   enum ucode_state retval = 0;
> >  
> > -   if (!uci->valid)
> > -   return UCODE_OK;
> > +   spin_lock(>ucode_lock);
> > +   retval = microcode_ops->apply_microcode(cpu);
> > +   spin_unlock(>ucode_lock);
> 
> What's the spinlock protecting against?

This is ensuring no 2 cpus do ucode update at the same time.

Since all cpus wait for all the online cpus to arrive in stop_machine handler.
Once we let go, every cpu tries to update. This just serializes against that.

> 
> We hold the hotplug lock and the microcode mutex. And yet interrupts are
> still enabled. So what's up?

hotplug lock/microcode mutex are at global level, these are 
protecting individual cpus in stop machine trying to update microcode.

these are called while in stop_machine() so i think interrupts are disabled IRC.

> 
> 
> > +   if (retval > UCODE_NFOUND) {
> > +   atomic_inc(>errors);
> 
> You don't need ->errors. Simply propagate retval from do_ucode_update().
> Or compare ucd->count to the number of CPUs. Or something like that.

That's what we are doing here, but simply returning number of cpus
that encountered failure instead of a per-cpu retval
like before.

I use ucd->count to use as an exit rendezvous.. to make sure we leave only
after all cpus have done updating ucode.

> > +   pr_warn("microcode update to cpu %d failed\n", cpu);
> > +   }
> > +   atomic_inc(>count);
> > +}
> > +
> > +/*
> > + * Wait for upto 1sec for all cpus
> > + * to show up in the rendezvous function
> > + */
> > +#define MAX_UCODE_RENDEZVOUS   10 /* nanosec */
> 
>   1 * NSEC_PER_SEC
> 
> > +#define SPINUNIT   100/* 100ns */
> > +
> > +/*
> > + * Each cpu waits for 1sec max.
> > + */
> > +static int ucode_wait_timedout(int *time_out, void *data)
> > +{
> > +   struct ucode_update_param *ucd = data;
> > +   if (*time_out < SPINUNIT) {
> > +   pr_err("Not all cpus entered ucode update handler %d cpus 
> > missing\n",
> > +   (num_online_cpus() - atomic_read(>enter)));
> > +   return 1;
> > +   }
> > +   *time_out -= SPINUNIT;
> > +   touch_nmi_watchdog();
> > +   return 0;
> > +}
> > +
> > +/*
> > + * All cpus enter here before a ucode load upto 1 sec.
> > + * If not all cpus showed up, we abort the ucode update
> > + * and return. ucode update is serialized with the spinlock
> 
> ... and yet you don't check stop_machine()'s retval and issue an error
> message that it failed.
> 

Will add that 

> > + */
> > +static int ucode_load_rendezvous(void *data)
> 
> The correct prefix is "microcode_"
> 
> > +{
> > +   int cpu = smp_processor_id();
> > +   struct ucode_update_param *ucd = data;
> > +   int timeout = MAX_UCODE_RENDEZVOUS;
> > +   int total_cpus = num_online_cpus();
> >  
> > -   ustate = microcode_ops->request_microcode_fw(cpu, _pdev->dev, 
> > true);
> > -   if (ustate != UCODE_OK)
> > -   return ustate;
> > +   /*
> > +* Wait for all cpu's to arrive
> > +*/
> > +   atomic_dec(>enter);
> > +   while(atomic_read(>enter)) {
> > +   if (ucode_wait_timedout(, ucd))
> > +   return 1;
> > +   ndelay(SPINUNIT);
> > +   }
> > +
> > +   do_ucode_update(cpu, ucd);
> >  
> > -   return apply_microcode_on_target(cpu);
> > +   /*
> > +* Wait for all cpu's to complete
> > +* ucode update
> > +*/
> > +   while (atomic_read(>count) != total_cpus)
> > +   cpu_relax();
> > +   return 0;
> >  }
> >  
> >  static ssize_t reload_store(struct device *dev,
> > @@ -509,7 +575,6 @@ static ssize_t reload_store(struct device 

Re: [PATCH 3/3] x86/microcode: Quiesce all threads before a microcode update.

2018-02-21 Thread Borislav Petkov
On Wed, Feb 21, 2018 at 08:49:44AM -0800, Ashok Raj wrote:
> Microcode updates during OS load always assumed the other hyperthread
> was "quiet", but Linux never really did this. We've recently received
> several issues on this, where things did not go well at scale
> deployments, and the Intel microcode team asked us to make sure the
> system is in a quiet state during these updates. Such updates are
> rare events, so we use stop_machine() to ensure the whole system is
> quiet.

Ewww, where do I begin?!

I really really hoped that we could avoid nasty dancing like that.

> Signed-off-by: Ashok Raj 
> Cc: X86 ML 
> Cc: LKML 
> Cc: Tom Lendacky 
> Cc: Thomas Gleixner 
> Cc: Ingo Molnar 
> Cc: Tony Luck 
> Cc: Andi Kleen 
> Cc: Boris Petkov 
> Cc: Arjan Van De Ven 
> ---
>  arch/x86/kernel/cpu/microcode/core.c  | 113 
> +-

This is generic so Tom needs to ack whatever we end up doing for the AMD
side.

>  arch/x86/kernel/cpu/microcode/intel.c |   1 +
>  2 files changed, 98 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/microcode/core.c 
> b/arch/x86/kernel/cpu/microcode/core.c
> index aa1b9a4..af0aeb2 100644
> --- a/arch/x86/kernel/cpu/microcode/core.c
> +++ b/arch/x86/kernel/cpu/microcode/core.c
> @@ -31,6 +31,9 @@
>  #include 
>  #include 
>  #include 
> +#include 
> +#include 
> +#include 
>  
>  #include 
>  #include 
> @@ -489,19 +492,82 @@ static void __exit microcode_dev_exit(void)
>  /* fake device for request_firmware */
>  static struct platform_device*microcode_pdev;
>  
> -static enum ucode_state reload_for_cpu(int cpu)
> +static struct ucode_update_param {
> + spinlock_t ucode_lock;
> + atomic_t   count;
> + atomic_t   errors;
> + atomic_t   enter;
> + inttimeout;
> +} uc_data;
> +
> +static void do_ucode_update(int cpu, struct ucode_update_param *ucd)
>  {
> - struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
> - enum ucode_state ustate;
> + enum ucode_state retval = 0;
>  
> - if (!uci->valid)
> - return UCODE_OK;
> + spin_lock(>ucode_lock);
> + retval = microcode_ops->apply_microcode(cpu);
> + spin_unlock(>ucode_lock);

What's the spinlock protecting against?

We hold the hotplug lock and the microcode mutex. And yet interrupts are
still enabled. So what's up?


> + if (retval > UCODE_NFOUND) {
> + atomic_inc(>errors);

You don't need ->errors. Simply propagate retval from do_ucode_update().
Or compare ucd->count to the number of CPUs. Or something like that.

> + pr_warn("microcode update to cpu %d failed\n", cpu);
> + }
> + atomic_inc(>count);
> +}
> +
> +/*
> + * Wait for upto 1sec for all cpus
> + * to show up in the rendezvous function
> + */
> +#define MAX_UCODE_RENDEZVOUS 10 /* nanosec */

1 * NSEC_PER_SEC

> +#define SPINUNIT 100/* 100ns */
> +
> +/*
> + * Each cpu waits for 1sec max.
> + */
> +static int ucode_wait_timedout(int *time_out, void *data)
> +{
> + struct ucode_update_param *ucd = data;
> + if (*time_out < SPINUNIT) {
> + pr_err("Not all cpus entered ucode update handler %d cpus 
> missing\n",
> + (num_online_cpus() - atomic_read(>enter)));
> + return 1;
> + }
> + *time_out -= SPINUNIT;
> + touch_nmi_watchdog();
> + return 0;
> +}
> +
> +/*
> + * All cpus enter here before a ucode load upto 1 sec.
> + * If not all cpus showed up, we abort the ucode update
> + * and return. ucode update is serialized with the spinlock

... and yet you don't check stop_machine()'s retval and issue an error
message that it failed.

> + */
> +static int ucode_load_rendezvous(void *data)

The correct prefix is "microcode_"

> +{
> + int cpu = smp_processor_id();
> + struct ucode_update_param *ucd = data;
> + int timeout = MAX_UCODE_RENDEZVOUS;
> + int total_cpus = num_online_cpus();
>  
> - ustate = microcode_ops->request_microcode_fw(cpu, _pdev->dev, 
> true);
> - if (ustate != UCODE_OK)
> - return ustate;
> + /*
> +  * Wait for all cpu's to arrive
> +  */
> + atomic_dec(>enter);
> + while(atomic_read(>enter)) {
> + if (ucode_wait_timedout(, ucd))
> + return 1;
> + ndelay(SPINUNIT);
> + }
> +
> + do_ucode_update(cpu, ucd);
>  
> - return apply_microcode_on_target(cpu);
> + /*
> +  * Wait for all cpu's to complete
> +  * ucode update
> +  */
> + while (atomic_read(>count) != total_cpus)
> + cpu_relax();
> + return 0;
>  }
>  
>  static ssize_t reload_store(struct device *dev,
> @@ -509,7 +575,6 @@ static ssize_t reload_store(struct device *dev,
>  

Re: [PATCH 3/3] x86/microcode: Quiesce all threads before a microcode update.

2018-02-21 Thread Borislav Petkov
On Wed, Feb 21, 2018 at 08:49:44AM -0800, Ashok Raj wrote:
> Microcode updates during OS load always assumed the other hyperthread
> was "quiet", but Linux never really did this. We've recently received
> several issues on this, where things did not go well at scale
> deployments, and the Intel microcode team asked us to make sure the
> system is in a quiet state during these updates. Such updates are
> rare events, so we use stop_machine() to ensure the whole system is
> quiet.

Ewww, where do I begin?!

I really really hoped that we could avoid nasty dancing like that.

> Signed-off-by: Ashok Raj 
> Cc: X86 ML 
> Cc: LKML 
> Cc: Tom Lendacky 
> Cc: Thomas Gleixner 
> Cc: Ingo Molnar 
> Cc: Tony Luck 
> Cc: Andi Kleen 
> Cc: Boris Petkov 
> Cc: Arjan Van De Ven 
> ---
>  arch/x86/kernel/cpu/microcode/core.c  | 113 
> +-

This is generic so Tom needs to ack whatever we end up doing for the AMD
side.

>  arch/x86/kernel/cpu/microcode/intel.c |   1 +
>  2 files changed, 98 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/microcode/core.c 
> b/arch/x86/kernel/cpu/microcode/core.c
> index aa1b9a4..af0aeb2 100644
> --- a/arch/x86/kernel/cpu/microcode/core.c
> +++ b/arch/x86/kernel/cpu/microcode/core.c
> @@ -31,6 +31,9 @@
>  #include 
>  #include 
>  #include 
> +#include 
> +#include 
> +#include 
>  
>  #include 
>  #include 
> @@ -489,19 +492,82 @@ static void __exit microcode_dev_exit(void)
>  /* fake device for request_firmware */
>  static struct platform_device*microcode_pdev;
>  
> -static enum ucode_state reload_for_cpu(int cpu)
> +static struct ucode_update_param {
> + spinlock_t ucode_lock;
> + atomic_t   count;
> + atomic_t   errors;
> + atomic_t   enter;
> + inttimeout;
> +} uc_data;
> +
> +static void do_ucode_update(int cpu, struct ucode_update_param *ucd)
>  {
> - struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
> - enum ucode_state ustate;
> + enum ucode_state retval = 0;
>  
> - if (!uci->valid)
> - return UCODE_OK;
> + spin_lock(>ucode_lock);
> + retval = microcode_ops->apply_microcode(cpu);
> + spin_unlock(>ucode_lock);

What's the spinlock protecting against?

We hold the hotplug lock and the microcode mutex. And yet interrupts are
still enabled. So what's up?


> + if (retval > UCODE_NFOUND) {
> + atomic_inc(>errors);

You don't need ->errors. Simply propagate retval from do_ucode_update().
Or compare ucd->count to the number of CPUs. Or something like that.

> + pr_warn("microcode update to cpu %d failed\n", cpu);
> + }
> + atomic_inc(>count);
> +}
> +
> +/*
> + * Wait for upto 1sec for all cpus
> + * to show up in the rendezvous function
> + */
> +#define MAX_UCODE_RENDEZVOUS 10 /* nanosec */

1 * NSEC_PER_SEC

> +#define SPINUNIT 100/* 100ns */
> +
> +/*
> + * Each cpu waits for 1sec max.
> + */
> +static int ucode_wait_timedout(int *time_out, void *data)
> +{
> + struct ucode_update_param *ucd = data;
> + if (*time_out < SPINUNIT) {
> + pr_err("Not all cpus entered ucode update handler %d cpus 
> missing\n",
> + (num_online_cpus() - atomic_read(>enter)));
> + return 1;
> + }
> + *time_out -= SPINUNIT;
> + touch_nmi_watchdog();
> + return 0;
> +}
> +
> +/*
> + * All cpus enter here before a ucode load upto 1 sec.
> + * If not all cpus showed up, we abort the ucode update
> + * and return. ucode update is serialized with the spinlock

... and yet you don't check stop_machine()'s retval and issue an error
message that it failed.

> + */
> +static int ucode_load_rendezvous(void *data)

The correct prefix is "microcode_"

> +{
> + int cpu = smp_processor_id();
> + struct ucode_update_param *ucd = data;
> + int timeout = MAX_UCODE_RENDEZVOUS;
> + int total_cpus = num_online_cpus();
>  
> - ustate = microcode_ops->request_microcode_fw(cpu, _pdev->dev, 
> true);
> - if (ustate != UCODE_OK)
> - return ustate;
> + /*
> +  * Wait for all cpu's to arrive
> +  */
> + atomic_dec(>enter);
> + while(atomic_read(>enter)) {
> + if (ucode_wait_timedout(, ucd))
> + return 1;
> + ndelay(SPINUNIT);
> + }
> +
> + do_ucode_update(cpu, ucd);
>  
> - return apply_microcode_on_target(cpu);
> + /*
> +  * Wait for all cpu's to complete
> +  * ucode update
> +  */
> + while (atomic_read(>count) != total_cpus)
> + cpu_relax();
> + return 0;
>  }
>  
>  static ssize_t reload_store(struct device *dev,
> @@ -509,7 +575,6 @@ static ssize_t reload_store(struct device *dev,
>   const char *buf, size_t size)
>  {
>   enum ucode_state tmp_ret = UCODE_OK;
> - bool do_callback = false;
>   unsigned long val;
>   ssize_t ret = 0;
>   int cpu;
> @@