[PATCH 1/2] powerpc/e500: make load_up_spe a normal fuction

2012-02-27 Thread Olivia Yin
So that we can call it in kernel.

Signed-off-by: Liu Yu yu@freescale.com
---
 arch/powerpc/kernel/head_fsl_booke.S |   23 ++-
 1 files changed, 6 insertions(+), 17 deletions(-)

diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
index d5d78c4..c96e025 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -539,8 +539,10 @@ interrupt_base:
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
NORMAL_EXCEPTION_PROLOG
-   bne load_up_spe
-   addir3,r1,STACK_FRAME_OVERHEAD
+   beq 1f
+   bl  load_up_spe
+   b   fast_exception_return
+1: addir3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
 #else
EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
@@ -743,7 +745,7 @@ tlb_write_entry:
 /* Note that the SPE support is closely modeled after the AltiVec
  * support.  Changes to one are likely to be applicable to the
  * other!  */
-load_up_spe:
+_GLOBAL(load_up_spe)
 /*
  * Disable SPE for the task which had SPE previously,
  * and save its SPE registers in its thread_struct.
@@ -791,20 +793,7 @@ load_up_spe:
subir4,r5,THREAD
stw r4,last_task_used_spe@l(r3)
 #endif /* !CONFIG_SMP */
-   /* restore registers and return */
-2: REST_4GPRS(3, r11)
-   lwz r10,_CCR(r11)
-   REST_GPR(1, r11)
-   mtcrr10
-   lwz r10,_LINK(r11)
-   mtlrr10
-   REST_GPR(10, r11)
-   mtspr   SPRN_SRR1,r9
-   mtspr   SPRN_SRR0,r12
-   REST_GPR(9, r11)
-   REST_GPR(12, r11)
-   lwz r11,GPR11(r11)
-   rfi
+   blr
 
 /*
  * SPE unavailable trap from kernel - print a message, but let
-- 
1.6.4


--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/2] KVM: booke: Improve SPE switch

2012-02-27 Thread Olivia Yin
Like book3s did for fp switch,
instead of switch SPE between host and guest,
the patch switch SPE state between qemu and guest.
In this way, we can simulate a host loadup SPE when load guest SPE state,
and let host to decide when to giveup SPE state.
Therefor it cooperates better with host SPE usage,
and so that has some performance benifit in UP host(lazy SPE).

Moreover, since the patch save guest SPE state into linux thread field,
it creates the condition to emulate guest SPE instructions in host,
so that we can avoid injecting SPE exception to guest.

The patch also turns all asm code into C code,
and add SPE stat counts.

Signed-off-by: Liu Yu yu@freescale.com
---
 arch/powerpc/include/asm/kvm_host.h |   11 +-
 arch/powerpc/kernel/asm-offsets.c   |7 
 arch/powerpc/kvm/booke.c|   63 +++
 arch/powerpc/kvm/booke.h|8 +
 arch/powerpc/kvm/booke_interrupts.S |   37 
 arch/powerpc/kvm/e500.c |5 ---
 arch/powerpc/kvm/timing.c   |5 +++
 arch/powerpc/kvm/timing.h   |   11 ++
 8 files changed, 83 insertions(+), 64 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 1843d5d..6186d08 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -117,6 +117,11 @@ struct kvm_vcpu_stat {
u32 st;
u32 st_slow;
 #endif
+#ifdef CONFIG_SPE
+   u32 spe_unavail;
+   u32 spe_fp_data;
+   u32 spe_fp_round;
+#endif
 };
 
 enum kvm_exit_types {
@@ -147,6 +152,11 @@ enum kvm_exit_types {
FP_UNAVAIL,
DEBUG_EXITS,
TIMEINGUEST,
+#ifdef CONFIG_SPE
+   SPE_UNAVAIL,
+   SPE_FP_DATA,
+   SPE_FP_ROUND,
+#endif
__NUMBER_OF_KVM_EXIT_TYPES
 };
 
@@ -330,7 +340,6 @@ struct kvm_vcpu_arch {
 #ifdef CONFIG_SPE
ulong evr[32];
ulong spefscr;
-   ulong host_spefscr;
u64 acc;
 #endif
 #ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index 8e0db0b..ff68f71 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -604,13 +604,6 @@ int main(void)
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
 #endif
 
-#if defined(CONFIG_KVM)  defined(CONFIG_SPE)
-   DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
-   DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
-   DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
-   DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
-#endif
-
 #ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ee9e1ee..f20010b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -55,6 +55,11 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ dec,VCPU_STAT(dec_exits) },
{ ext_intr,   VCPU_STAT(ext_intr_exits) },
{ halt_wakeup, VCPU_STAT(halt_wakeup) },
+#ifdef CONFIG_SPE
+   { spe_unavail, VCPU_STAT(spe_unavail) },
+   { spe_fp_data, VCPU_STAT(spe_fp_data) },
+   { spe_fp_round, VCPU_STAT(spe_fp_round) },
+#endif
{ NULL }
 };
 
@@ -80,11 +85,11 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
 }
 
 #ifdef CONFIG_SPE
-void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
+static void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 {
preempt_disable();
-   enable_kernel_spe();
-   kvmppc_save_guest_spe(vcpu);
+   if (current-thread.regs-msr  MSR_SPE)
+   giveup_spe(current);
vcpu-arch.shadow_msr = ~MSR_SPE;
preempt_enable();
 }
@@ -92,8 +97,10 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
 {
preempt_disable();
-   enable_kernel_spe();
-   kvmppc_load_guest_spe(vcpu);
+   if (!(current-thread.regs-msr  MSR_SPE)) {
+   load_up_spe(NULL);
+   current-thread.regs-msr |= MSR_SPE;
+   }
vcpu-arch.shadow_msr |= MSR_SPE;
preempt_enable();
 }
@@ -104,7 +111,7 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
if (!(vcpu-arch.shadow_msr  MSR_SPE))
kvmppc_vcpu_enable_spe(vcpu);
} else if (vcpu-arch.shadow_msr  MSR_SPE) {
-   kvmppc_vcpu_disable_spe(vcpu);
+   vcpu-arch.shadow_msr = ~MSR_SPE;
}
 }
 #else
@@ -124,7 +131,8 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
vcpu-arch.shared-msr = new_msr;
 
kvmppc_mmu_msr_notify(vcpu, old_msr);
-   kvmppc_vcpu_sync_spe(vcpu);
+   if ((old_msr ^ new_msr)  MSR_SPE)
+   kvmppc_vcpu_sync_spe(vcpu);
 }
 
 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu 

Re: [PATCH 1/2] powerpc/e500: make load_up_spe a normal fuction

2012-02-27 Thread Tabi Timur-B04825
On Mon, Feb 27, 2012 at 4:59 AM, Olivia Yin hong-hua@freescale.com wrote:
 So that we can call it in kernel.

And why would we want that?

-- 
Timur Tabi
Linux kernel developer at Freescale
--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH 24/37] KVM: PPC: booke: rework rescheduling checks

2012-02-27 Thread Bhushan Bharat-R65777


 -Original Message-
 From: kvm-ow...@vger.kernel.org [mailto:kvm-ow...@vger.kernel.org] On Behalf 
 Of
 Alexander Graf
 Sent: Friday, February 24, 2012 7:56 PM
 To: kvm-ppc@vger.kernel.org
 Cc: k...@vger.kernel.org; linuxppc-...@lists.ozlabs.org; Wood Scott-B07421
 Subject: [PATCH 24/37] KVM: PPC: booke: rework rescheduling checks
 
 Instead of checking whether we should reschedule only when we exited due to an
 interrupt, let's always check before entering the guest back again. This gets
 the target more in line with the other archs.
 
 Also while at it, generalize the whole thing so that eventually we could have 
 a
 single kvmppc_prepare_to_enter function for all ppc targets that does signal 
 and
 reschedule checking for us.
 
 Signed-off-by: Alexander Graf ag...@suse.de
 ---
  arch/powerpc/include/asm/kvm_ppc.h |2 +-
  arch/powerpc/kvm/book3s.c  |4 ++-
  arch/powerpc/kvm/booke.c   |   70 ---
  3 files changed, 52 insertions(+), 24 deletions(-)
 
 diff --git a/arch/powerpc/include/asm/kvm_ppc.h
 b/arch/powerpc/include/asm/kvm_ppc.h
 index e709975..7f0a3da 100644
 --- a/arch/powerpc/include/asm/kvm_ppc.h
 +++ b/arch/powerpc/include/asm/kvm_ppc.h
 @@ -95,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);  extern 
 void
 kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
 
 -extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
 +extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
  extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);  extern void
 kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);  extern void
 kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); diff --git
 a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 7d54f4e..c8ead7b
 100644
 --- a/arch/powerpc/kvm/book3s.c
 +++ b/arch/powerpc/kvm/book3s.c
 @@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned
 int priority)
   return true;
  }
 
 -void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 +int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  {
   unsigned long *pending = vcpu-arch.pending_exceptions;
   unsigned long old_pending = vcpu-arch.pending_exceptions; @@ -283,6
 +283,8 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 
   /* Tell the guest about our interrupt status */
   kvmppc_update_int_pending(vcpu, *pending, old_pending);
 +
 + return 0;
  }
 
  pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) diff --git
 a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 9979be1..3fcec2c
 100644
 --- a/arch/powerpc/kvm/booke.c
 +++ b/arch/powerpc/kvm/booke.c
 @@ -439,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu
 *vcpu)  }
 
  /* Check pending exceptions and deliver one, if possible. */ -void
 kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 +int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  {
 + int r = 0;
   WARN_ON_ONCE(!irqs_disabled());
 
   kvmppc_core_check_exceptions(vcpu);
 @@ -451,8 +452,44 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
   local_irq_disable();
 
   kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
 - kvmppc_core_check_exceptions(vcpu);
 + r = 1;
   };
 +
 + return r;
 +}
 +
 +/*
 + * Common checks before entering the guest world.  Call with interrupts
 + * disabled.
 + *
 + * returns !0 if a signal is pending and check_signal is true  */
 +static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu, bool
 +check_signal) {
 + int r = 0;
 +
 + WARN_ON_ONCE(!irqs_disabled());
 + while (true) {
 + if (need_resched()) {
 + local_irq_enable();
 + cond_resched();
 + local_irq_disable();
 + continue;
 + }
 +
 + if (kvmppc_core_prepare_to_enter(vcpu)) {

kvmppc_prepare_to_enter() is called even on heavyweight_exit. Should not this 
be called only on lightweight_exit?

Thanks
-Bharat

 + /* interrupts got enabled in between, so we
 +are back at square 1 */
 + continue;
 + }
 +
 + if (check_signal  signal_pending(current))
 + r = 1;
 +
 + break;
 + }
 +
 + return r;
  }
 
  int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) @@ 
 -470,10
 +507,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
   }
 
   local_irq_disable();
 -
 - kvmppc_core_prepare_to_enter(vcpu);
 -
 - if (signal_pending(current)) {
 + if (kvmppc_prepare_to_enter(vcpu, true)) {
   kvm_run-exit_reason = KVM_EXIT_INTR;
   ret = -EINTR;
   goto out;
 @@ -598,25 +632,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct
 

RE: [PATCH 24/37] KVM: PPC: booke: rework rescheduling checks

2012-02-27 Thread Bhushan Bharat-R65777


 -Original Message-
 From: Alexander Graf [mailto:ag...@suse.de]
 Sent: Monday, February 27, 2012 11:53 PM
 To: Bhushan Bharat-R65777
 Cc: kvm-ppc@vger.kernel.org; k...@vger.kernel.org; 
 linuxppc-...@lists.ozlabs.org;
 Wood Scott-B07421
 Subject: Re: [PATCH 24/37] KVM: PPC: booke: rework rescheduling checks
 
 On 02/27/2012 06:33 PM, Alexander Graf wrote:
  On 02/27/2012 05:34 PM, Bhushan Bharat-R65777 wrote:
 
  +}
  +
  +/*
  + * Common checks before entering the guest world.  Call with
  interrupts
  + * disabled.
  + *
  + * returns !0 if a signal is pending and check_signal is true  */
  +static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu, bool
  +check_signal) {
  +int r = 0;
  +
  +WARN_ON_ONCE(!irqs_disabled());
  +while (true) {
  +if (need_resched()) {
  +local_irq_enable();
  +cond_resched();
  +local_irq_disable();
  +continue;
  +}
  +
  +if (kvmppc_core_prepare_to_enter(vcpu)) {
  kvmppc_prepare_to_enter() is called even on heavyweight_exit. Should
  not this be called only on lightweight_exit?
 
  Yeah, we don't need to call it when exiting anyways. That's a
  functional change though, which this patch is trying not to introduce.
  So we should rather do that as a patch on top.
 
 So how about this (warning! broken whitespace)?
 
 
 diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index
 7a16b56..616aa2d 100644
 --- a/arch/powerpc/kvm/booke.c
 +++ b/arch/powerpc/kvm/booke.c
 @@ -464,7 +464,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
*
* returns !0 if a signal is pending and check_signal is true
*/
 -static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu, bool
 check_signal)
 +static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
   {
  int r = 0;
 
 @@ -483,7 +483,7 @@ static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu,
 bool check_signal)
  continue;
  }
 
 -   if (check_signal  signal_pending(current))
 +   if (signal_pending(current))
  r = 1;
 
  break;
 @@ -507,7 +507,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
 kvm_vcpu
 *vcpu)
  }
 
  local_irq_disable();
 -   if (kvmppc_prepare_to_enter(vcpu, true)) {
 +   if (kvmppc_prepare_to_enter(vcpu)) {
  kvm_run-exit_reason = KVM_EXIT_INTR;
  ret = -EINTR;
  goto out;
 @@ -941,13 +941,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct
 kvm_vcpu *vcpu,
   * To avoid clobbering exit_reason, only check for signals if we
   * aren't already exiting to userspace for some other reason.
   */
 -   local_irq_disable();
 -   if (kvmppc_prepare_to_enter(vcpu, !(r  RESUME_HOST))) {
 -   run-exit_reason = KVM_EXIT_INTR;
 -   r = (-EINTR  2) | RESUME_HOST | (r  RESUME_FLAG_NV);
 -   kvmppc_account_exit(vcpu, SIGNAL_EXITS);
 +   if (!(r  RESUME_HOST)) {
 +   local_irq_disable();
 +   if (kvmppc_prepare_to_enter(vcpu)) {
 +   run-exit_reason = KVM_EXIT_INTR;
 +   r = (-EINTR  2) | RESUME_HOST | (r 
 RESUME_FLAG_NV);
 +   kvmppc_account_exit(vcpu, SIGNAL_EXITS);
 +   }
  }
 
 +out:

Why?
Otherwise looks ok to me.

Thanks
-Bharat

  return r;
   }
 
 


--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 24/37] KVM: PPC: booke: rework rescheduling checks

2012-02-27 Thread Scott Wood
On 02/24/2012 08:26 AM, Alexander Graf wrote:
 -void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 +int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  {
   unsigned long *pending = vcpu-arch.pending_exceptions;
   unsigned long old_pending = vcpu-arch.pending_exceptions;
 @@ -283,6 +283,8 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  
   /* Tell the guest about our interrupt status */
   kvmppc_update_int_pending(vcpu, *pending, old_pending);
 +
 + return 0;
  }
  
  pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
 index 9979be1..3fcec2c 100644
 --- a/arch/powerpc/kvm/booke.c
 +++ b/arch/powerpc/kvm/booke.c
 @@ -439,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu 
 *vcpu)
  }
  
  /* Check pending exceptions and deliver one, if possible. */
 -void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 +int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  {
 + int r = 0;
   WARN_ON_ONCE(!irqs_disabled());
  
   kvmppc_core_check_exceptions(vcpu);
 @@ -451,8 +452,44 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
   local_irq_disable();
  
   kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
 - kvmppc_core_check_exceptions(vcpu);
 + r = 1;
   };
 +
 + return r;
 +}
 +
 +/*
 + * Common checks before entering the guest world.  Call with interrupts
 + * disabled.
 + *
 + * returns !0 if a signal is pending and check_signal is true
 + */
 +static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu, bool check_signal)
 +{
 + int r = 0;
 +
 + WARN_ON_ONCE(!irqs_disabled());
 + while (true) {
 + if (need_resched()) {
 + local_irq_enable();
 + cond_resched();
 + local_irq_disable();
 + continue;
 + }
 +
 + if (kvmppc_core_prepare_to_enter(vcpu)) {
 + /* interrupts got enabled in between, so we
 +are back at square 1 */
 + continue;
 + }
 +
 +
 + if (check_signal  signal_pending(current))
 + r = 1;

If there is a signal pending and MSR[WE] is set, we'll loop forever
without reaching this check.

-Scott

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v6 4/4] KVM: PPC: epapr: Update other hypercall invoking

2012-02-27 Thread Scott Wood
On 02/23/2012 03:22 AM, Liu Yu wrote:
 diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
 index 2dcdbc9..99ebdde 100644
 --- a/drivers/virt/Kconfig
 +++ b/drivers/virt/Kconfig
 @@ -15,6 +15,7 @@ if VIRT_DRIVERS
  config FSL_HV_MANAGER
   tristate Freescale hypervisor management driver
   depends on FSL_SOC
 + select EPAPR_PARAVIRT
   help
The Freescale hypervisor management driver provides several 
 services
 to drivers and applications related to the Freescale hypervisor:

What about the byte channel driver, and possibly others?

Grep for the hypercalls to make sure you got everything that uses this.

-Scott

--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH 1/2] powerpc/e500: make load_up_spe a normal fuction

2012-02-27 Thread Yin Olivia-R63875
Hi Scott,

This had been reviewed before and accepted by internal tree.
http://linux.freescale.net/patchwork/patch/11100/
http://git.am.freescale.net/gitolite/gitweb.cgi/sdk/kvm.git/commit/?h=for-sdk1.2id=c5088844dc665dbdae4fa51b8d58dc203bacc17e

I didn't change anything except the line.
I just commit to external kvm-ppc mailing list. Should I add my own 
Signed-off-by?

Best Regards,
Olivia

-Original Message-
From: Wood Scott-B07421 
Sent: Tuesday, February 28, 2012 3:19 AM
To: Yin Olivia-R63875
Cc: kvm-ppc@vger.kernel.org; k...@vger.kernel.org; 
linuxppc-...@lists.ozlabs.org; Liu Yu-B13201
Subject: Re: [PATCH 1/2] powerpc/e500: make load_up_spe a normal fuction

On 02/27/2012 04:59 AM, Olivia Yin wrote:
 So that we can call it in kernel.
 
 Signed-off-by: Liu Yu yu@freescale.com

Explain why we want this, and point out that this makes it similar to 
load_up_fpu.

 ---
  arch/powerpc/kernel/head_fsl_booke.S |   23 ++-
  1 files changed, 6 insertions(+), 17 deletions(-)

When posting a patch authored by someone else, more or less unchanged, you 
should put a From: line in the body of the e-mail.

git send-email will do this automatically if you preserve the authorship in the 
git commit.

Also, you should add your own Signed-off-by.

-Scott


Recall: [PATCH 1/2] powerpc/e500: make load_up_spe a normal fuction

2012-02-27 Thread Yin Olivia-R63875
Yin Olivia-R63875 would like to recall the message, [PATCH 1/2] powerpc/e500: 
make load_up_spe a normal fuction.
--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html