Re: [PATCH] cpufreq: powernv: Add boost files to export ultra-turbo frequencies

2016-12-22 Thread Gautham R Shenoy
Hi Shilpa,

On Fri, Dec 16, 2016 at 04:43:08PM +0530, Shilpasri G Bhat wrote:
> In P8+, Workload Optimized Frequency(WOF) provides the capability to
> boost the cpu frequency based on the utilization of the other cpus
> running in the chip. The On-Chip-Controller(OCC) firmware will control
> the achievability of these frequencies depending on the power headroom
> available in the chip. Currently the ultra-turbo frequencies provided
> by this feature are exported along with the turbo and sub-turbo
> frequencies as scaling_available_frequencies. This patch will export
> the ultra-turbo frequencies separately as scaling_boost_frequencies in
> WOF enabled systems. This patch will add the boost sysfs file which
> can be used to disable/enable ultra-turbo frequencies.
> 
> Signed-off-by: Shilpasri G Bhat 
> ---
>  drivers/cpufreq/powernv-cpufreq.c | 48 
> ---
>  1 file changed, 45 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/cpufreq/powernv-cpufreq.c 
> b/drivers/cpufreq/powernv-cpufreq.c
> index 37671b5..56dfd91 100644
> --- a/drivers/cpufreq/powernv-cpufreq.c
> +++ b/drivers/cpufreq/powernv-cpufreq.c
> @@ -144,6 +144,7 @@ enum throttle_reason_type {
>   unsigned int max;
>   unsigned int nominal;
>   unsigned int nr_pstates;
> + bool wof_enabled;
>  } powernv_pstate_info;
> 
>  /* Use following macros for conversions between pstate_id and index */
> @@ -203,6 +204,7 @@ static int init_powernv_pstates(void)
>   const __be32 *pstate_ids, *pstate_freqs;
>   u32 len_ids, len_freqs;
>   u32 pstate_min, pstate_max, pstate_nominal;
> + u32 pstate_turbo, pstate_ultra_turbo;
> 
>   power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
>   if (!power_mgt) {
> @@ -225,6 +227,25 @@ static int init_powernv_pstates(void)
>   pr_warn("ibm,pstate-nominal not found\n");
>   return -ENODEV;
>   }
> +
> + if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
> +  _ultra_turbo)) {
> + powernv_pstate_info.wof_enabled = false;
> + goto next;
> + }
> +
> + if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
> +  _turbo)) {
> + powernv_pstate_info.wof_enabled = false;
> + goto next;
> + }
> +
> + if (pstate_turbo == pstate_ultra_turbo)
> + powernv_pstate_info.wof_enabled = false;
> + else
> + powernv_pstate_info.wof_enabled = true;
> +
> +next:
>   pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
>   pstate_nominal, pstate_max);

Could you also print if ultra_turbo is enabled ?

> 
> @@ -268,6 +289,13 @@ static int init_powernv_pstates(void)
>   powernv_pstate_info.nominal = i;
>   else if (id == pstate_min)
>   powernv_pstate_info.min = i;
> +
> + if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {

powernv_pstate_info.wof_enabled check is not required since we will
bail out of the loop below as j < powernv_pstate_info.max in case when
turbo = ultra-turbo or if ultra-turbo is not defined.

That said, it makes the code more readable so let us keep it.

> + int j;
> +
> + for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
> + powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
> + }
> 
>   /* End of list marker entry */
> @@ -305,9 +333,12 @@ static ssize_t cpuinfo_nominal_freq_show(struct 
> cpufreq_policy *policy,
>  struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
>   __ATTR_RO(cpuinfo_nominal_freq);
> 
> +#define SCALING_BOOST_FREQS_ATTR_INDEX   2
> +
>  static struct freq_attr *powernv_cpu_freq_attr[] = {
>   _freq_attr_scaling_available_freqs,
>   _freq_attr_cpuinfo_nominal_freq,
> + _freq_attr_scaling_boost_freqs,
>   NULL,
>  };
> 
> @@ -1013,11 +1044,22 @@ static int __init powernv_cpufreq_init(void)
>   register_reboot_notifier(_cpufreq_reboot_nb);
>   opal_message_notifier_register(OPAL_MSG_OCC, _cpufreq_opal_nb);
> 
> + if (powernv_pstate_info.wof_enabled)
> + powernv_cpufreq_driver.boost_enabled = true;
> + else
> + powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
> +
>   rc = cpufreq_register_driver(_cpufreq_driver);
> - if (!rc)
> - return 0;
> + if (rc) {
> + pr_info("Failed to register the cpufreq driver (%d)\n", rc);
> + goto clean_notifiers;

cleanup_notifiers ?

> + }
> 
> - pr_info("Failed to register the cpufreq driver (%d)\n", rc);
> + if (powernv_pstate_info.wof_enabled)
> + cpufreq_enable_boost_support();
> +
> + return 0;
> +clean_notifiers:
>   unregister_all_notifiers();
>   clean_chip_info();
>  out:
> -- 
> 1.8.3.1
> 

Looks good 

Re: [PATCH RFC] powerpc/powernv: sysfs entry to force full IPL reboot

2016-12-22 Thread Stewart Smith
Andrew Donnellan  writes:
> skiboot now supports "fast reboot", a reboot procedure where skiboot
> reinitialises hardware and loads a new kernel without re-IPLing the
> machine. At present, fast reboot support is still experimental and is not
> enabled by default, however it is intended that it will be enabled by
> default in a near-future release.
>
> There may be some circumstances where the user wants to force a full IPL
> reboot rather than using fast reboot. Add support for the
> OPAL_REBOOT_FULL_IPL reboot type, enabled by writing 1 to
> /sys/firmware/opal/force_full_ipl_reboot. On versions of skiboot that
> implement the OPAL_REBOOT_FULL_IPL reboot type, this will force an IPL. On
> versions that do not, print an error message on reboot and proceed with a
> regular reboot (which could be a full IPL or a fast reboot).
>
> Cc: Stewart Smith 
> Cc: Benjamin Herrenschmidt 
> Signed-off-by: Andrew Donnellan 
>
> ---
>
> Corresponding skiboot patch: http://patchwork.ozlabs.org/patch/697601/

FWIW I've just merged the skiboot patch.

-- 
Stewart Smith
OPAL Architect, IBM.



Re: [PATCH RFC] powerpc/powernv: sysfs entry to force full IPL reboot

2016-12-22 Thread Stewart Smith
Michael Ellerman  writes:
> Andrew Donnellan  writes:
>
>> On 23/11/16 12:37, Andrew Donnellan wrote:
 There's existing logic in kernel/reboot.c to handle a reboot= command
 line parameter, which can set the reboot_mode, so my preference would be
 that we use that.

 Currently we completely ignore the reboot_mode, so there's no backward
 compatibility issue.

 It looks like the default is REBOOT_COLD, whatever that means. So I
 think we could define that on powernv REBOOT_HARD means "do a full IPL".
>>>
>>> Sounds good.
>>
>> Thinking about this more - you can't change that at runtime. :(
>
> Yeah. Is that a problem? From my POV this is basically a "my firmware is
> broken" workaround, so you're going to want to set it permanently until
> you update your firmware.

The only real world application I can see wanting to do that is perhaps
HTX - where it chooses what kind of bootme run it wants. So, well, for
certain definitions of "real world".

-- 
Stewart Smith
OPAL Architect, IBM.



[PATCH] powerpc: Fix build warning on 32-bit PPC - bisected to commit 989cea5c14be

2016-12-22 Thread Larry Finger
I am getting the following warning when I build kernel 4.9-git on my
PowerBook G4 with a 32-bit PPC processor:

  AS  arch/powerpc/kernel/misc_32.o
arch/powerpc/kernel/misc_32.S:299:7: warning: "CONFIG_FSL_BOOKE" is not defined 
[-Wundef]

This problem is evident after commit 989cea5c14be ("kbuild: prevent
lib-ksyms.o rebuilds"); however, this change in kbuild only exposes an
error that has been in the code since 2005 when this source file was
created. That was with commit 9994a33865f4 ("powerpc: Introduce
entry_{32,64}.S, misc_{32,64}.S, systbl.S"). The offending line is
way does not make a lot of sense. This error does not seem to cause any
errors in the executable, thus I am not recommending that it be applied
to any stable versions.

Thanks to Nicholas Piggin for suggesting this solution.

Fixes: 9994a33865f4 ("powerpc: Introduce entry_{32,64}.S, misc_{32,64}.S, 
systbl.S")
Signed-off-by: Larry Finger 
Cc: Nicholas Piggin 
Cc: Benjamin Herrenschmidt 
Cc: Paul Mackerras 
Cc: Michael Ellerman 
Cc: linuxppc-dev@lists.ozlabs.org
---
 arch/powerpc/kernel/misc_32.S | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 1863324..84db14e 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -296,7 +296,7 @@ _GLOBAL(flush_instruction_cache)
lis r3, KERNELBASE@h
iccci   0,r3
 #endif
-#elif CONFIG_FSL_BOOKE
+#elif defined(CONFIG_FSL_BOOKE)
 BEGIN_FTR_SECTION
mfspr   r3,SPRN_L1CSR0
ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
-- 
2.10.2



Re: [PATCH v3 13/15] livepatch: change to a per-task consistency model

2016-12-22 Thread Josh Poimboeuf
On Thu, Dec 22, 2016 at 03:34:52PM +0100, Petr Mladek wrote:
> On Wed 2016-12-21 15:25:05, Josh Poimboeuf wrote:
> > On Tue, Dec 20, 2016 at 06:32:46PM +0100, Petr Mladek wrote:
> > > On Thu 2016-12-08 12:08:38, Josh Poimboeuf wrote:
> > > > Change livepatch to use a basic per-task consistency model.  This is the
> > > > foundation which will eventually enable us to patch those ~10% of
> > > > security patches which change function or data semantics.  This is the
> > > > biggest remaining piece needed to make livepatch more generally useful.
> > > > 
> > > > [1] https://lkml.kernel.org/r/20141107140458.ga21...@suse.cz
> > > > 
> > > > --- /dev/null
> > > > +++ b/kernel/livepatch/transition.c
> > > > +/*
> > > > + * Initialize the global target patch state and all tasks to the 
> > > > initial patch
> > > > + * state, and initialize all function transition states to true in 
> > > > preparation
> > > > + * for patching or unpatching.
> > > > + */
> > > > +void klp_init_transition(struct klp_patch *patch, int state)
> > > > +{
> > > > +   struct task_struct *g, *task;
> > > > +   unsigned int cpu;
> > > > +   struct klp_object *obj;
> > > > +   struct klp_func *func;
> > > > +   int initial_state = !state;
> > > > +
> > > > +   WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
> > > > +
> > > > +   klp_transition_patch = patch;
> > > > +
> > > > +   /*
> > > > +* Set the global target patch state which tasks will switch 
> > > > to.  This
> > > > +* has no effect until the TIF_PATCH_PENDING flags get set 
> > > > later.
> > > > +*/
> > > > +   klp_target_state = state;
> > > > +
> > > > +   /*
> > > > +* If the patch can be applied or reverted immediately, skip the
> > > > +* per-task transitions.
> > > > +*/
> > > > +   if (patch->immediate)
> > > > +   return;
> > > > +
> > > > +   /*
> > > > +* Initialize all tasks to the initial patch state to prepare 
> > > > them for
> > > > +* switching to the target state.
> > > > +*/
> > > > +   read_lock(_lock);
> > > > +   for_each_process_thread(g, task) {
> > > > +   WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
> > > > +   task->patch_state = initial_state;
> > > > +   }
> > > > +   read_unlock(_lock);
> > > > +
> > > > +   /*
> > > > +* Ditto for the idle "swapper" tasks.
> > > > +*/
> > > > +   get_online_cpus();
> > > > +   for_each_online_cpu(cpu) {
> > > > +   task = idle_task(cpu);
> > > > +   WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
> > > > +   task->patch_state = initial_state;
> > > > +   }
> > > > +   put_online_cpus();
> > > 
> > > We allow to add/remove CPUs here. I am afraid that we will also need
> > > to add a cpu coming/going handler that will set the task->patch_state
> > > the right way. We must not set the klp_target_state until all ftrace
> > > handlers are ready.
> > 
> > What if we instead just change the above to use for_each_possible_cpu()?
> > We could do the same in klp_complete_transition().
> 
> I like this idea. It seems that there is idle task for each possible
> cpu, see idle_threads_init().
> 
> IMHO, we should do the same everytime we do anything with the idle
> tasks. I mean in klp_start_transition, klp_try_complete_transition()
> and also complete_transition().
> 
> Then they will be handled like any other processes and we do not need
> to think of any special races.

More on this below.

> > > > +   /*
> > > > +* Enforce the order of the task->patch_state initializations 
> > > > and the
> > > > +* func->transition updates to ensure that, in the enable path,
> > > > +* klp_ftrace_handler() doesn't see a func in transition with a
> > > > +* task->patch_state of KLP_UNDEFINED.
> > > > +*/
> > > > +   smp_wmb();
> > > > +
> > > > +   /*
> > > > +* Set the func transition states so klp_ftrace_handler() will 
> > > > know to
> > > > +* switch to the transition logic.
> > > > +*
> > > > +* When patching, the funcs aren't yet in the func_stack and 
> > > > will be
> > > > +* made visible to the ftrace handler shortly by the calls to
> > > > +* klp_patch_object().
> > > > +*
> > > > +* When unpatching, the funcs are already in the func_stack and 
> > > > so are
> > > > +* already visible to the ftrace handler.
> > > > +*/
> > > > +   klp_for_each_object(patch, obj)
> > > > +   klp_for_each_func(obj, func)
> > > > +   func->transition = true;
> > > > +}
> > > > +
> > > > +/*
> > > > + * Start the transition to the specified target patch state so tasks 
> > > > can begin
> > > > + * switching to it.
> > > > + */
> > > > +void klp_start_transition(void)
> > > > +{
> > > > +   struct task_struct *g, *task;
> > > > +  

Re: Build warning on 32-bit PPC - bisected to commit 989cea5c14be

2016-12-22 Thread Larry Finger

On 12/21/2016 08:02 PM, Nicholas Piggin wrote:

Hi Larry,

This is strange you've bisected it there, I can't see how that patch would
trigger it. That said, powerpc has had a few small build system glitches.

It looks like this warning could be fixed by changing #elif CONFIG_FSL_BOOKE
to #elif defined (CONFIG_FSL_BOOKE). Want to send a patch (if it works)?


Nick,

I agree that the "bad" commit does not look likely to be the problem, but I 
verified it by checking out the previous commit, which does not show the 
problem. I'm quite sure that PowerPC has a lot of quirks and glitches beyond 
those caused by the Apple hardware.


I have tested the change and I will submit the patch.

Thanks,

Larry




Re: [PATCH v3 13/15] livepatch: change to a per-task consistency model

2016-12-22 Thread Petr Mladek
On Wed 2016-12-21 15:25:05, Josh Poimboeuf wrote:
> On Tue, Dec 20, 2016 at 06:32:46PM +0100, Petr Mladek wrote:
> > On Thu 2016-12-08 12:08:38, Josh Poimboeuf wrote:
> > > Change livepatch to use a basic per-task consistency model.  This is the
> > > foundation which will eventually enable us to patch those ~10% of
> > > security patches which change function or data semantics.  This is the
> > > biggest remaining piece needed to make livepatch more generally useful.
> > > 
> > > [1] https://lkml.kernel.org/r/20141107140458.ga21...@suse.cz
> > > 
> > > --- /dev/null
> > > +++ b/kernel/livepatch/transition.c
> > > +/*
> > > + * Initialize the global target patch state and all tasks to the initial 
> > > patch
> > > + * state, and initialize all function transition states to true in 
> > > preparation
> > > + * for patching or unpatching.
> > > + */
> > > +void klp_init_transition(struct klp_patch *patch, int state)
> > > +{
> > > + struct task_struct *g, *task;
> > > + unsigned int cpu;
> > > + struct klp_object *obj;
> > > + struct klp_func *func;
> > > + int initial_state = !state;
> > > +
> > > + WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
> > > +
> > > + klp_transition_patch = patch;
> > > +
> > > + /*
> > > +  * Set the global target patch state which tasks will switch to.  This
> > > +  * has no effect until the TIF_PATCH_PENDING flags get set later.
> > > +  */
> > > + klp_target_state = state;
> > > +
> > > + /*
> > > +  * If the patch can be applied or reverted immediately, skip the
> > > +  * per-task transitions.
> > > +  */
> > > + if (patch->immediate)
> > > + return;
> > > +
> > > + /*
> > > +  * Initialize all tasks to the initial patch state to prepare them for
> > > +  * switching to the target state.
> > > +  */
> > > + read_lock(_lock);
> > > + for_each_process_thread(g, task) {
> > > + WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
> > > + task->patch_state = initial_state;
> > > + }
> > > + read_unlock(_lock);
> > > +
> > > + /*
> > > +  * Ditto for the idle "swapper" tasks.
> > > +  */
> > > + get_online_cpus();
> > > + for_each_online_cpu(cpu) {
> > > + task = idle_task(cpu);
> > > + WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
> > > + task->patch_state = initial_state;
> > > + }
> > > + put_online_cpus();
> > 
> > We allow to add/remove CPUs here. I am afraid that we will also need
> > to add a cpu coming/going handler that will set the task->patch_state
> > the right way. We must not set the klp_target_state until all ftrace
> > handlers are ready.
> 
> What if we instead just change the above to use for_each_possible_cpu()?
> We could do the same in klp_complete_transition().

I like this idea. It seems that there is idle task for each possible
cpu, see idle_threads_init().

IMHO, we should do the same everytime we do anything with the idle
tasks. I mean in klp_start_transition, klp_try_complete_transition()
and also complete_transition().

Then they will be handled like any other processes and we do not need
to think of any special races.


> > > + /*
> > > +  * Enforce the order of the task->patch_state initializations and the
> > > +  * func->transition updates to ensure that, in the enable path,
> > > +  * klp_ftrace_handler() doesn't see a func in transition with a
> > > +  * task->patch_state of KLP_UNDEFINED.
> > > +  */
> > > + smp_wmb();
> > > +
> > > + /*
> > > +  * Set the func transition states so klp_ftrace_handler() will know to
> > > +  * switch to the transition logic.
> > > +  *
> > > +  * When patching, the funcs aren't yet in the func_stack and will be
> > > +  * made visible to the ftrace handler shortly by the calls to
> > > +  * klp_patch_object().
> > > +  *
> > > +  * When unpatching, the funcs are already in the func_stack and so are
> > > +  * already visible to the ftrace handler.
> > > +  */
> > > + klp_for_each_object(patch, obj)
> > > + klp_for_each_func(obj, func)
> > > + func->transition = true;
> > > +}
> > > +
> > > +/*
> > > + * Start the transition to the specified target patch state so tasks can 
> > > begin
> > > + * switching to it.
> > > + */
> > > +void klp_start_transition(void)
> > > +{
> > > + struct task_struct *g, *task;
> > > + unsigned int cpu;
> > > +
> > > + WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
> > > +
> > > + pr_notice("'%s': %s...\n", klp_transition_patch->mod->name,
> > > +   klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
> > > +
> > > + /*
> > > +  * If the patch can be applied or reverted immediately, skip the
> > > +  * per-task transitions.
> > > +  */
> > > + if (klp_transition_patch->immediate)
> > > + return;
> > > +
> > > + /*
> > > +  * Mark all normal tasks as needing a patch state update.  As they pass
> > > +  * through the syscall barrier they'll switch over to the target state
> > > +  * (unless we switch them in klp_try_complete_transition() first).
> > > +  */
> > > + read_lock(_lock);

[PATCH] powerpc/64: quieten section mismatch warnings

2016-12-22 Thread Nicholas Piggin
Some of the boot code located at the start of kernel text is "init"
class, in that it only runs at boot time, however marking it as normal
init code is problematic because that puts it into a different section
located at the very end of kernel text.

E.g., in case the TOC is not set up, we may not be able to tolerate a
branch trampoline to reach the init function.

Create a whitelist function prefix that prevents the init section
reference warnings for these cases.

Signed-off-by: Nicholas Piggin 
---

I've had enough of seeing these warnings and having my build break
when I forget to turn the non-fatal option on! Can we just hit them
on the head with a hammer?

Thanks,
Nick

 arch/powerpc/kernel/head_64.S | 14 ++
 scripts/mod/modpost.c | 18 --
 2 files changed, 22 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 1dc5eae2ced3..6be7b9f55278 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -478,7 +478,6 @@ __mmu_off:
  *   
  *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
  * DT block, r4 is a physical pointer to the kernel itself
- *
  */
 __start_initialization_multiplatform:
/* Make sure we are running in 64 bits mode */
@@ -498,7 +497,7 @@ __start_initialization_multiplatform:
 */
cmpldi  cr0,r5,0
beq 1f
-   b   __boot_from_prom/* yes -> prom */
+   b   boot_initref__boot_from_prom/* yes -> prom */
 1:
/* Save parameters */
mr  r31,r3
@@ -532,7 +531,7 @@ __start_initialization_multiplatform:
b   __after_prom_start
 #endif /* CONFIG_PPC_BOOK3E */
 
-__boot_from_prom:
+boot_initref__boot_from_prom:
 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
/* Save parameters */
mr  r31,r3
@@ -662,7 +661,7 @@ p_end: .llong _end - copy_to_here
add r5,r5,r8
 5: bl  copy_and_flush  /* copy the rest */
 
-9: b   start_here_multiplatform
+9: b   boot_initref__start_here_multiplatform
 
 /*
  * Copy routine used to copy the kernel to start at physical address 0
@@ -897,7 +896,7 @@ p_toc:  .llong  __toc_start + 0x8000 - 0b
 /*
  * This is where the main kernel code starts.
  */
-start_here_multiplatform:
+boot_initref__start_here_multiplatform:
/* set up the TOC */
bl  relative_toc
tovirt(r2,r2)
@@ -962,7 +961,7 @@ start_here_multiplatform:
mr  r3,r31
bl  early_setup /* also sets r13 and SPRG_PACA */
 
-   LOAD_REG_ADDR(r3, start_here_common)
+   LOAD_REG_ADDR(r3, boot_initref__start_here_common)
ld  r4,PACAKMSR(r13)
mtspr   SPRN_SRR0,r3
mtspr   SPRN_SRR1,r4
@@ -970,8 +969,7 @@ start_here_multiplatform:
b   .   /* prevent speculative execution */
 
/* This is where all platforms converge execution */
-
-start_here_common:
+boot_initref__start_here_common:
/* relocation is on at this point */
std r1,PACAKSAVE(r13)
 
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 29c89a6bad3d..548a160b4d24 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1162,8 +1162,16 @@ static const struct sectioncheck *section_mismatch(
  *   fromsec = text section
  *   refsymname = *.constprop.*
  *
+ * Pattern 6:
+ *   powerpc64 has early boot functions that reference init, but must
+ *   remain close to architectural boot entry address.
+ *   This pattern is identified by
+ *   tosec   = init section
+ *   fromsym = boot_initref__*
+ *
  **/
-static int secref_whitelist(const struct sectioncheck *mismatch,
+static int secref_whitelist(const struct elf_info *elf,
+   const struct sectioncheck *mismatch,
const char *fromsec, const char *fromsym,
const char *tosec, const char *tosym)
 {
@@ -1200,6 +1208,12 @@ static int secref_whitelist(const struct sectioncheck 
*mismatch,
match(fromsym, optim_symbols))
return 0;
 
+   /* Check for pattern 6 */
+   if (elf->hdr->e_machine == EM_PPC64)
+   if (match(tosec, init_sections) &&
+   !strncmp(fromsym, "boot_initref__", 
strlen("boot_initref__")))
+   return 0;
+
return 1;
 }
 
@@ -1540,7 +1554,7 @@ static void default_mismatch_handler(const char *modname, 
struct elf_info *elf,
tosym = sym_name(elf, to);
 
/* check whitelist - we may ignore it */
-   if (secref_whitelist(mismatch,
+   if (secref_whitelist(elf, mismatch,
 fromsec, fromsym, tosec, tosym)) {
report_sec_mismatch(modname, mismatch,
fromsec, r->r_offset, fromsym,
-- 
2.11.0



[PATCH] arch: powerpc: ppc4xx compile flag optimizations

2016-12-22 Thread John Crispin
From: Imre Kaloz 

This patch splits up the compile flags between ppc40x and ppc44x.

Signed-off-by: John Crispin 
Signed-off-by: Imre Kaloz 
---
 arch/powerpc/Makefile  |3 ++-
 arch/powerpc/boot/Makefile |6 +++---
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 617dece..d85bb82 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -217,7 +217,8 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
 KBUILD_CFLAGS  += -mno-sched-epilog
 endif
 
-cpu-as-$(CONFIG_4xx)   += -Wa,-m405
+cpu-as-$(CONFIG_40x)   += -Wa,-m405
+cpu-as-$(CONFIG_44x)   += -Wa,-m440
 cpu-as-$(CONFIG_ALTIVEC)   += $(call as-option,-Wa$(comma)-maltivec)
 cpu-as-$(CONFIG_E200)  += -Wa,-me200
 
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 9d47f2e..5c2feaa 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -53,10 +53,10 @@ BOOTCFLAGS  += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
 DTC_FLAGS  ?= -p 1024
 
 $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
+$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
 $(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
+$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
+$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
 $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
 $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
 $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
-- 
1.7.10.4