[COMMIT] [virtio] Revert published indexes.

2010-10-01 Thread Vadim Rozenfeld
repository: c:/kernel.org/kvm-guest-drivers-windows
branch: master
commit 0f7574bdc663aba3ae7f2a8addb2b3a4f34fdd76
Author: Vadim Rozenfeld vroze...@redhat.com
Date:   Fri Oct 1 13:04:00 2010 +0200

[virtio] From: Yan Vugenfirer yvuge...@redhat.com Revert published
indexes

diff --git a/VirtIO/VirtIO.h b/VirtIO/VirtIO.h
index e98088b..6125a8f 100644
--- a/VirtIO/VirtIO.h
+++ b/VirtIO/VirtIO.h
@@ -13,9 +13,6 @@
 /* We've given up on this device. */
 #define VIRTIO_CONFIG_S_FAILED 0x80
 
-/* virtio library features bits */
-#define VIRTIO_F_INDIRECT  28
-#define VIRTIO_F_PUBLISH_INDICES   29
 /**
  * virtqueue - a queue to register buffers for sending or receiving.
  * @callback: the function to call when buffers are consumed (can be
NULL).
diff --git a/VirtIO/VirtIO.sln b/VirtIO/VirtIO.sln
index a9224ed..b50bb8a 100644
--- a/VirtIO/VirtIO.sln
+++ b/VirtIO/VirtIO.sln
@@ -1,6 +1,6 @@
 
-Microsoft Visual Studio Solution File, Format Version 9.00
-# Visual Studio 2005
+Microsoft Visual Studio Solution File, Format Version 10.00
+# Visual Studio 2008
 Project({8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}) = VirtIO,
VirtIO.vcproj, {15DC9616-56BC-474A-90C2-38DBAF64BA06}
 EndProject
 Global
diff --git a/VirtIO/VirtIO.vcproj b/VirtIO/VirtIO.vcproj
index e4f7760..7cd5731 100644
--- a/VirtIO/VirtIO.vcproj
+++ b/VirtIO/VirtIO.vcproj
@@ -1,11 +1,12 @@
 ?xml version=1.0 encoding=Windows-1252?
 VisualStudioProject
ProjectType=Visual C++
-   Version=8.00
+   Version=9.00
Name=VirtIO
ProjectGUID={15DC9616-56BC-474A-90C2-38DBAF64BA06}
RootNamespace=VirtIO
Keyword=MakeFileProj
+   TargetFrameworkVersion=131072

Platforms
Platform
diff --git a/VirtIO/VirtIOPCI.c b/VirtIO/VirtIOPCI.c
index 6d18023..5708de9 100644
--- a/VirtIO/VirtIOPCI.c
+++ b/VirtIO/VirtIOPCI.c
@@ -97,12 +97,6 @@ bool VirtIODeviceEnableGuestFeature(VirtIODevice *
pVirtIODevice, unsigned uFeat
return !!(ulValue  (1  uFeature));
 }
 
-bool VirtIODeviceHasFeature(unsigned uFeature)
-{
-   if (uFeature == VIRTIO_F_PUBLISH_INDICES) return TRUE;
-   return FALSE;
-}
-
 
/
 //
 // Reset device
@@ -206,7 +200,7 @@ static void vp_notify(struct virtqueue *vq)
 ULONG VirtIODeviceISR(VirtIODevice * pVirtIODevice)
 {
ULONG status;
-   DPrintf(6, (%s\n, __FUNCTION__));
+   DPrintf(4, (%s\n, __FUNCTION__));
 
status = ReadVirtIODeviceByte(pVirtIODevice-addr +
VIRTIO_PCI_ISR);
 
@@ -334,3 +328,8 @@ u32 VirtIODeviceGetQueueSize(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vq-priv;
return info-num;
 }
+
+void* VirtIODeviceDetachUnusedBuf(struct virtqueue *vq)
+{
+return vring_detach_unused_buf(vq);
+}
diff --git a/VirtIO/VirtIORing.c b/VirtIO/VirtIORing.c
index 585ada4..207af87 100644
--- a/VirtIO/VirtIORing.c
+++ b/VirtIO/VirtIORing.c
@@ -44,6 +44,9 @@ struct _declspec(align(PAGE_SIZE)) vring_virtqueue
/* Number we've added since last sync. */
unsigned int num_added;
 
+   /* Last used index we've seen. */
+   u16 last_used_idx;
+
/* How to notify other side. FIXME: commonalize hcalls! */
void (*notify)(struct virtqueue *vq);
 
@@ -231,27 +234,25 @@ static void vring_shutdown(struct virtqueue *_vq)
 
 static bool more_used(const struct vring_virtqueue *vq)
 {
-return vring_last_used(vq-vring) != vq-vring.used-idx;
+   return vq-last_used_idx != vq-vring.used-idx;
 }
 
 static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
 {
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
-struct vring_used_elem *u;
unsigned int i;
 
if (!more_used(vq)) {
-   DPrintf(4, (No more buffers in queue: last_used_idx %d
vring.used-idx %d\n, 
-   vring_last_used(vq-vring),
-   vq-vring.used-idx));
+   DPrintf(4, (No more buffers in queue: last_used_idx %d
vring.used-idx %d\n, vq-last_used_idx, vq-vring.used-idx));
return NULL;
}
 
-   u = vq-vring.used-ring[vring_last_used(vq-vring) %
vq-vring.num];
-   i = u-id;
-   *len = u-len;
+   /* Only get used array entries after they have been exposed by
host. */
+   rmb();
 
+   i = vq-vring.used-ring[vq-last_used_idx%vq-vring.num].id;
+   *len = vq-vring.used-ring[vq-last_used_idx%
vq-vring.num].len;
 
DPrintf(4, (%s id %d, len %d\n, __FUNCTION__, i, *len) );
 
@@ -267,7 +268,7 @@ static void *vring_get_buf(struct virtqueue *_vq,
unsigned int *len)
/* detach_buf clears data, so grab it now. */
ret = vq-data[i];
detach_buf(vq, i);
-vring_last_used(vq-vring)++;
+   vq-last_used_idx++;
return ret;
 }
 
@@ -316,7 +317,7 @@ void initialize_virtqueue(struct vring_virtqueue
*vq,
vq-vq.vq_ops = vring_vq_ops;
vq-notify = 

[COMMIT] [NetKVM] Revert published indexes.

2010-10-01 Thread Vadim Rozenfeld
repository: c:/kernel.org/kvm-guest-drivers-windows
branch: master
commit 10660d8c613b1615d0193a0e983985bcb9431149
Author: Vadim Rozenfeld vroze...@redhat.com
Date:   Fri Oct 1 13:13:45 2010 +0200

[NetKVM] From: Yan Vugenfirer yvuge...@redhat.com  Revert
published indexes.

diff --git a/NetKVM/Common/ParaNdis-Common.c
b/NetKVM/Common/ParaNdis-Common.c
index a1c8a74..a775c84 100644
--- a/NetKVM/Common/ParaNdis-Common.c
+++ b/NetKVM/Common/ParaNdis-Common.c
@@ -110,7 +110,6 @@ typedef struct _tagConfigurationEntries
tConfigurationEntry PriorityVlanTagging;
tConfigurationEntry VlanId;
tConfigurationEntry UseMergeableBuffers;
-   tConfigurationEntry PublishIndices;
tConfigurationEntry MTU;
 }tConfigurationEntries;
 
@@ -149,7 +148,6 @@ static const tConfigurationEntries
defaultConfiguration =
{ *PriorityVLANTag, 3, 0, 3},
{ VlanId, 0, 0, 4095},
{ MergeableBuf, 1, 0, 1},
-   { PublishIndices, 1, 0, 1},
{ MTU, 1500, 500, 65500},
 };
 
@@ -279,7 +277,6 @@ static void ReadNicConfiguration(PARANDIS_ADAPTER
*pContext, PUCHAR *ppNewMACAdd
GetConfigurationEntry(cfg,
pConfiguration-PriorityVlanTagging);
GetConfigurationEntry(cfg,
pConfiguration-VlanId);
GetConfigurationEntry(cfg,
pConfiguration-UseMergeableBuffers);
-   GetConfigurationEntry(cfg,
pConfiguration-PublishIndices);
GetConfigurationEntry(cfg,
pConfiguration-MTU);
 
#if !defined(WPP_EVENT_TRACING)
@@ -321,7 +318,6 @@ static void ReadNicConfiguration(PARANDIS_ADAPTER
*pContext, PUCHAR *ppNewMACAdd
pContext-ulPriorityVlanSetting =
pConfiguration-PriorityVlanTagging.ulValue;
pContext-VlanId =
pConfiguration-VlanId.ulValue;
pContext-bUseMergedBuffers =
pConfiguration-UseMergeableBuffers.ulValue != 0;
-   pContext-bDoPublishIndices =
pConfiguration-PublishIndices.ulValue != 0;
pContext-MaxPacketSize.nMaxDataSize =
pConfiguration-MTU.ulValue;
if (!pContext-bDoSupportPriority)
pContext-ulPriorityVlanSetting = 0;
@@ -436,8 +432,6 @@ static void DumpVirtIOFeatures(VirtIODevice *pIO)
{VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_UFO},
{VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_MRG_RXBUF},
{VIRTIO_NET_F_STATUS, VIRTIO_NET_F_STATUS},
-   {VIRTIO_F_INDIRECT, VIRTIO_F_INDIRECT},
-   {VIRTIO_F_PUBLISH_INDICES, VIRTIO_F_PUBLISH_INDICES},
};
UINT i;
for (i = 0; i  sizeof(Features)/sizeof(Features[0]); ++i)
@@ -478,7 +472,7 @@ static void PrintStatistics(PARANDIS_ADAPTER
*pContext)
pContext-nofFreeHardwareBuffers,
pContext-minFreeHardwareBuffers));
pContext-minFreeHardwareBuffers =
pContext-nofFreeHardwareBuffers;
DPrintf(0, ([Diag!] TX packets to return %d,
pContext-NetTxPacketsToReturn));
-   DPrintf(0, ([Diag!] Bytes transmitted %I64u, received %I64u,
interrupts %d, pContext-Statistics.ifHCOutOctets,
pContext-Statistics.ifHCInOctets, pContext-ulIrqReceived));
+   DPrintf(0, ([Diag!] Bytes transmitted %I64u, received %I64u,
pContext-Statistics.ifHCOutOctets, pContext-Statistics.ifHCInOctets));
 }
 
 
@@ -620,16 +614,6 @@ NDIS_STATUS ParaNdis_InitializeContext(
pContext-CurrentMacAddress[4],
pContext-CurrentMacAddress[5]));
}
-   if (pContext-bDoPublishIndices)
-   pContext-bDoPublishIndices =
VirtIODeviceGetHostFeature(pContext-IODevice,
VIRTIO_F_PUBLISH_INDICES) != 0;
-   if (pContext-bDoPublishIndices 
VirtIODeviceHasFeature(VIRTIO_F_PUBLISH_INDICES))
-   {
-
VirtIODeviceEnableGuestFeature(pContext-IODevice,
VIRTIO_F_PUBLISH_INDICES);
-   }
-   else
-   {
-   pContext-bDoPublishIndices = FALSE;
-   }
}
else
{
@@ -1723,7 +1707,7 @@ static UINT
ParaNdis_ProcessRxPath(PARANDIS_ADAPTER *pContext)
NdisReleaseSpinLock(pContext-ReceiveLock);
if (nReceived  pBatchOfPackets)
{
-   DPrintf(1, ([%s] received %d buffers, __FUNCTION__,
nReceived));
+   DPrintf(1, ([%s]%d: received %d buffers, __FUNCTION__,
KeGetCurrentProcessorNumber(), nReceived));
ParaNdis_IndicateReceivedBatch(pContext,
pBatchOfPackets, nReceived);
}
if (pBatchOfPackets) NdisFreeMemory(pBatchOfPackets, 0, 0);
@@ -1790,19 +1774,29 @@ ULONG ParaNdis_DPCWorkBody(PARANDIS_ADAPTER
*pContext)
int nRestartResult = 2, nLoop = 0;
while (nRestartResult)
{
-   UINT n =
ParaNdis_ProcessRxPath(pContext);

[COMMIT] [NetKVM] fix build and signing scripts

2010-10-01 Thread Vadim Rozenfeld
repository: c:/kernel.org/kvm-guest-drivers-windows
branch: master
commit 809713e25bc6beb40850661df9039da804915545
Author: Vadim Rozenfeld vroze...@redhat.com
Date:   Fri Oct 1 13:16:50 2010 +0200

[NetKVM] From: Yan Vugenfirer yvuge...@redhat.com fix build and
signing scripts

diff --git a/NetKVM/buildAll.bat b/NetKVM/buildAll.bat
index 9379f4a..8e2ad7b 100644
--- a/NetKVM/buildAll.bat
+++ b/NetKVM/buildAll.bat
@@ -20,6 +20,8 @@ set _MAJORVERSION_=209
 set _MINORVERSION_=605
 set _DRIVER_ISO_NAME=Install-%_MINORVERSION_%%_MAJORVERSION_%.iso
 
+set OLD_PATH=%PATH%
+
 if not %1== goto parameters_here
 echo no parameters specified, rebuild all
 call clean.bat
@@ -46,7 +48,7 @@ goto nextparam
 : (NDIS is different from general-purpose kernel)
 :
 :copyVirtIO
-for %%f in (..\VirtIO\*.h ..\VirtIO\*.c) do copy %%f VirtIO /Y
+for %%f in (..\VirtIO\VirtIO*.h ..\VirtIO\VirtIO*.c ..\VirtIO
\PVUtils.c ..\VirtIO\PVUtils.h ..\VirtIO\PVUtils.h) do copy %%f
VirtIO /Y
 goto :eof
 
 
@@ -59,7 +61,6 @@ set _VERSION_=%_NT_TARGET_MAJ%.%_NT_TARGET_MIN%.%
_MAJORVERSION_%.%_MINORVERSION_
 echo version set: %_VERSION_%
 goto :eof
 
-
 :Win7 
 set DDKBUILDENV=
 pushd %BUILDROOT%
@@ -158,3 +159,5 @@ goto continue
 :echo Packing to ISO image
 :call tools\makecdimage.cmd %_DRIVER_ISO_NAME% Install
 goto :eof
+echo setting old path back
+set PATH=%OLD_PATH%
\ No newline at end of file
diff --git a/NetKVM/tools/signing.cmd b/NetKVM/tools/signing.cmd
index 1daf47e..f4c0a39 100644
--- a/NetKVM/tools/signing.cmd
+++ b/NetKVM/tools/signing.cmd
@@ -30,16 +30,16 @@ goto :eof
 :signVista
 shift
 if /i %1==x86 set _OSMASK_=Vista_X86,Server2008_X86,7_X86
-if /i %1==amd64 set _OSMASK_=Vista_X64,Server2008_X64,7_X64
-if /i %1==x64 set _OSMASK_=Vista_X64,Server2008_X64,7_X64
+if /i %1==amd64 set
_OSMASK_=Vista_X64,Server2008_X64,7_X64,Server2008R2_X64
+if /i %1==x64 set
_OSMASK_=Vista_X64,Server2008_X64,7_X64,Server2008R2_X64
 call :dosign %1 %2 %3 
 goto :eof
 
 :signWin7
 shift
 if /i %1==x86 set _OSMASK_=Vista_X86,Server2008_X86,7_X86
-if /i %1==amd64 set _OSMASK_=Vista_X64,Server2008_X64,7_X64
-if /i %1==x64 set _OSMASK_=Vista_X64,Server2008_X64,7_X64
+if /i %1==amd64 set
_OSMASK_=Vista_X64,Server2008_X64,7_X64,Server2008R2_X64
+if /i %1==x64 set
_OSMASK_=Vista_X64,Server2008_X64,7_X64,Server2008R2_X64
 call :dosign %1 %2 %3 
 goto :eof
 

--
To unsubscribe from this list: send the line unsubscribe kvm-commits in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[COMMIT] [WIN-GUEST-DRIVERS] fix build and cleanup scripts

2010-10-01 Thread Vadim Rozenfeld
repository: c:/kernel.org/kvm-guest-drivers-windows
branch: master
commit 11ad3ec4be2fcf47d3b63c8b89cd7ad688a83a33
Author: Vadim Rozenfeld vroze...@redhat.com
Date:   Fri Oct 1 13:18:40 2010 +0200

[WIN-GUEST-DRIVERS] fix build and cleanup scripts

diff --git a/buildAll.bat b/buildAll.bat
index bd91e7a..ed20391 100644
--- a/buildAll.bat
+++ b/buildAll.bat
@@ -10,7 +10,11 @@ cd viostor
 call buildall.bat
 cd ..
 
-cd Balloon\BalloonWDF
+cd Balloon
 call buildall.bat
-cd ..\..
+cd ..
+
+cd vioserial
+call buildall.bat
+cd ..
 
diff --git a/clean.bat b/clean.bat
index c74d6fa..50d586c 100644
--- a/clean.bat
+++ b/clean.bat
@@ -10,6 +10,11 @@ cd viostor
 call clean.bat
 cd ..
 
-cd Balloon\BalloonWDF
-call clean.bat
-cd ..\..
\ No newline at end of file
+cd Balloon
+call cleanall.bat
+cd ..
+
+
+cd vioserial
+call cleanall.bat
+cd ..
\ No newline at end of file

--
To unsubscribe from this list: send the line unsubscribe kvm-commits in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: how to debug unhandled vm exit: 0x11?

2010-10-01 Thread Neo Jia
On Wed, Sep 29, 2010 at 1:38 AM, Avi Kivity a...@redhat.com wrote:
  On 09/28/2010 08:40 PM, Neo Jia wrote:

 I found the instruction that caused this problem:

 emulation failed (failure) rip 71f14651 66 0f 7f 07

 And according to Intel, this is a MOVDQA. So, do we already have this
 instruction emulated as I am using a pretty old version of KVM
 (release 88)? If yes, could you point me to the file I need to look at
 for that specific patch?


 movdqa is not emulated.

I am going to give a try to emulate this instruction. BTW, do we have
any unit test for those emulated x86 instruction sets?

Thanks,
Neo


 --
 I have a truly marvellous patch that fixes the bug which this
 signature is too narrow to contain.





-- 
I would remember that if researchers were not ambitious
probably today we haven't the technology we are using!
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


cpulimit and kvm process

2010-10-01 Thread Mihamina Rakotomandimby
Manao ahoana, Hello, Bonjour,

I would like to launch several KVM guests on a multicore CPU.
The number of the KVM process is over the number of physical cores.

I would like to limit each KVM process to say... 10% of CPU

I first use cpulimit 

Would you know some better way to limit them? it's in order to avoid 4
VM to hog all the 4 hardware cores.

I also use all the livbirt tools, if there si any sol

Misaotra, Thanks, Merci.


-- 

   Architecte Informatique chez Blueline/Gulfsat:
Administration Systeme, Recherche  Developpement
+261 34 56 000 19
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v12 06/17] Use callback to deal with skb_release_data() specially.

2010-10-01 Thread David Miller
From: xiaohui@intel.com
Date: Thu, 30 Sep 2010 22:04:23 +0800

 @@ -197,10 +197,11 @@ struct skb_shared_info {
   union skb_shared_tx tx_flags;
   struct sk_buff  *frag_list;
   struct skb_shared_hwtstamps hwtstamps;
 - skb_frag_t  frags[MAX_SKB_FRAGS];
   /* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
   void *  destructor_arg;
 +
 + skb_frag_t  frags[MAX_SKB_FRAGS];
  };
  
  /* The structure is for a skb which pages may point to

Why are you moving frags[] to the end like this?
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: cpulimit and kvm process

2010-10-01 Thread pradeep
On Fri, 1 Oct 2010 10:03:28 +0300
Mihamina Rakotomandimby miham...@gulfsat.mg wrote:

 Manao ahoana, Hello, Bonjour,
 
 I would like to launch several KVM guests on a multicore CPU.
 The number of the KVM process is over the number of physical cores.
 
 I would like to limit each KVM process to say... 10% of CPU
 
 I first use cpulimit 
 
 Would you know some better way to limit them? it's in order to avoid 4
 VM to hog all the 4 hardware cores.
 
 I also use all the livbirt tools, if there si any sol
 
 Misaotra, Thanks, Merci.
 


You should be able to limit cpu utilization using cgroups



 

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


x64 guest on 32bit host?

2010-10-01 Thread Jun Koi
hi,

i am trying to create a 64bit VM on 32bit host. i use the following command:

kvm -cpu kvm64 

but then i got error like this CPU is not 64-bit compatible

i also tried with -cpu qemu64, but that didnt help.

so is it possible to run guest 64bit on host 32bit with KVM?

thanks,
Jun
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: x64 guest on 32bit host?

2010-10-01 Thread Alexander Graf

On 01.10.2010, at 13:14, Jun Koi wrote:

 hi,
 
 i am trying to create a 64bit VM on 32bit host. i use the following command:
 
 kvm -cpu kvm64 
 
 but then i got error like this CPU is not 64-bit compatible
 
 i also tried with -cpu qemu64, but that didnt help.
 
 so is it possible to run guest 64bit on host 32bit with KVM?

No.


Alex

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: TSC in nested SVM and VMX

2010-10-01 Thread Nadav Har'El
On Thu, Sep 30, 2010, Zachary Amsden wrote about Re: TSC in nested SVM and 
VMX:
 1)  When reading an MSR, we are not emulating the L2 guest; we are 
 DIRECTLY reading the MSR for the L1 emulation.  Any emulation of the L2 
 guest is actually done by the code running /inside/ the L1 emulation, so 
 MSR reads for the L2 guest are handed by L1, and MSR reads for the L1 
 guest are handled by L0, which is this code.
...
 So if we are currently running nested, the L1 tsc_offset is stored in 
 the nested.hsave field; the vmcb which is active is polluted by the L2 
 guest offset, which would be incorrect to return to the L1 emulation.

Thanks for the detailed explanation.

It seems, then, that the nested VMX logic is somewhat different from that
of the nested SVM. In nested VMX, if a function gets called when running
L1, the current VMCS will be that of L1 (aka vmcs01), not of its guest L2
(and I'm not even sure *which* L2 that would be when there are multiple
L2 guests for the one L1).

Nadav.

-- 
Nadav Har'El|  Friday, Oct  1 2010, 23 Tishri 5771
n...@math.technion.ac.il |-
Phone +972-523-790466, ICQ 13349191 |What's tiny, yellow and very dangerous? A
http://nadav.harel.org.il   |canary with the super-user password.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


KVM CPU Numbering

2010-10-01 Thread Yushu Yao
Hi Experts,

 Question:

IN Qemu Monitor if i type info cpus the shown cpu list like:

QEMU 0.12.5 monitor - type 'help' for more information
(qemu) info cpus
* CPU #0: pc=0xfff0 thread_id=11170
  CPU #1: pc=0xfff0 (halted) thread_id=11171
  CPU #2: pc=0xfff0 (halted) thread_id=11172
  CPU #3: pc=0xfff0 (halted) thread_id=11173
  CPU #4: pc=0xfff0 (halted) thread_id=11174
  CPU #5: pc=0xfff0 (halted) thread_id=11175
  CPU #6: pc=0xfff0 (halted) thread_id=11176
  CPU #7: pc=0xfff0 (halted) thread_id=11177

Does these #0, #1, ... map to the numbering in /proc/cpuinfo of the
guest? (processor : xxx)

I am asking this because I'd like to pin the cores on a
hyper-threading host. In the guest I'm using -smp
8,cores=4,threads=2,sockets=1.

Only if I know which thread_id corresponds to which core in the guest,
can I pin the cores correctly.

Thanks a lot!

-Yushu
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: how to debug unhandled vm exit: 0x11?

2010-10-01 Thread Avi Kivity

 On 10/01/2010 08:16 AM, Neo Jia wrote:

On Wed, Sep 29, 2010 at 1:38 AM, Avi Kivitya...@redhat.com  wrote:
On 09/28/2010 08:40 PM, Neo Jia wrote:

  I found the instruction that caused this problem:

  emulation failed (failure) rip 71f14651 66 0f 7f 07

  And according to Intel, this is a MOVDQA. So, do we already have this
  instruction emulated as I am using a pretty old version of KVM
  (release 88)? If yes, could you point me to the file I need to look at
  for that specific patch?


  movdqa is not emulated.

I am going to give a try to emulate this instruction. BTW, do we have
any unit test for those emulated x86 instruction sets?


Indeed we do:

http://git.kernel.org/?p=virt/kvm/kvm-unit-tests.git;a=summary

Check out x86/emulator.c.

--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: TSC in nested SVM and VMX

2010-10-01 Thread Alexander Graf

On 01.10.2010, at 13:21, Nadav Har'El wrote:

 On Thu, Sep 30, 2010, Zachary Amsden wrote about Re: TSC in nested SVM and 
 VMX:
 1)  When reading an MSR, we are not emulating the L2 guest; we are 
 DIRECTLY reading the MSR for the L1 emulation.  Any emulation of the L2 
 guest is actually done by the code running /inside/ the L1 emulation, so 
 MSR reads for the L2 guest are handed by L1, and MSR reads for the L1 
 guest are handled by L0, which is this code.
 ...
 So if we are currently running nested, the L1 tsc_offset is stored in 
 the nested.hsave field; the vmcb which is active is polluted by the L2 
 guest offset, which would be incorrect to return to the L1 emulation.
 
 Thanks for the detailed explanation.
 
 It seems, then, that the nested VMX logic is somewhat different from that
 of the nested SVM. In nested VMX, if a function gets called when running
 L1, the current VMCS will be that of L1 (aka vmcs01), not of its guest L2
 (and I'm not even sure *which* L2 that would be when there are multiple
 L2 guests for the one L1).

If the #vmexit comes while you're in L1, everything works on the L1's vmcb. If 
you hit it while in L2, everything works on the L2's vmcb unless special 
attention is taken.

The reason behind the TSC shift is very simple. With the tsc_offset setting 
we're trying to adjust the L1's offset. Adjusting the L1's offset means we need 
to adjust L1 and L2 alike, as the virtual L2's offset == L1 offset + vmcb L2 
offset, because L2's TSC is also offset by the amount L1 is.

So basically what happens is:

nested VMRUN:

svm-vmcb-control.tsc_offset += nested_vmcb-control.tsc_offset;

please note the +=!


svm_write_tsc_offset:

This gets called when we really want to current level's TSC offset only because 
the guest issued a tsc write. In L2 this means the L2's value.

if (is_nested(svm)) {
g_tsc_offset = svm-vmcb-control.tsc_offset -
   svm-nested.hsave-control.tsc_offset;

Remember the difference between L1 and L2.

svm-nested.hsave-control.tsc_offset = offset;

Set L1 to the new offset

}

svm-vmcb-control.tsc_offset = offset + g_tsc_offset;

Set L2 to new offset + delta.


So what this function does is that it treats TSC writes as L1 writes even while 
in L2 and adjusts L2 accordingly. Joerg, this sounds fishy to me. Are you sure 
this is intended and works when L1 doesn't intercept MSR writes to TSC?


svm_adjust_tsc_offset:

svm-vmcb-control.tsc_offset += adjustment;
if (is_nested(svm))
svm-nested.hsave-control.tsc_offset += adjustment;

Very simple case. We want to adjust L1's offset, so we need to adjust L1 and L2 
because the change is transparent to L2.


#VMEXIT:

/* Restore the original control entries */  
   
copy_vmcb_control_area(vmcb, hsave);
   

which again does:

dst-tsc_offset   = from-tsc_offset;

So we're setting the tsc offset to the value that's stored in the host save 
area.



Alex

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[ANNOUNCE] kvm-kmod-2.6.35.6

2010-10-01 Thread Jan Kiszka
Just a stable update from the 2.6.35 series.

KVM changes since kvm-kmod-2.6.35:
 - VMX: Fix host GDT.LIMIT corruption
 - MMU: fix mmu notifier invalidate handler for huge spte
 - x86: emulator: inc/dec can have lock prefix
 - MMU: fix direct sp's access corrupted
 - Prevent internal slots from being COWed
 - Keep slot ID in memory slot structure

kvm-kmod changes:
 - Warn about TXT conflict on kernels  2.6.35

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[ANNOUNCE] kvm-kmod-2.6.36-rc6

2010-10-01 Thread Jan Kiszka
The KVM development for 2.6.36 settled, so it's time to role out the
first kvm-kmod test version based on that kernel. The changelog might be
a bit boring - I was too lazy to dig for potential interesting details.

Major KVM changes since kvm-kmod-2.6.35.6:
 - No major features: mostly improved mmu and emulator correctness, some
   performance improvements, and support for guest XSAVE and AVX. [Avi]

kvm-kmod changes:
 - Warn about TXT conflict on kernels  2.6.35
 - Remove some wrapping code for unsupported host kernels
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


VMX: System lock-up in guest mode, BIOS under suspect

2010-10-01 Thread Jan Kiszka
Hi,

for the past days I've been trying to understand a very strange hard
lock-up of some Intel i7 boxes when running our 16-bit guest OS under
KVM. After applying some instrumentation before and after the VM entry
(e.g. direct write to VGA memory), it turned out that the system is
apparently stuck inside guest mode!

I double-checked that VM exits on external IRQs and NMIs are properly
enabled in the VMCS - they are. I also tried to capture any potential
last words via serial console and even via remote DMA over Firewire) -
nothing. This likely means that not only the one core in guest mode is
stuck but all the others as well (note: the freeze is reproducible both
in UP and SMP mode). Very uncommon for an OS crash I would say...

So I decided to go for some nice conspiracy theory and put SMIs and
related BIOS code under suspect. Interestingly, this worked out:

After disabling all SMIs on my box (Fujitsu Celsius H700) via the
chipset register, the hard freezes no longer occurred up to now. My
customer was able to confirm this on some Lenovo Notebook as well. We
are currently collecting data about the affected systems to correlate
it, and we are performing longer test runs.

Nevertheless, I would like to collect some first comments on this. I'm
specifically wondering...

 - if there is anything the host OS can mess up to make VM exits crash
   on the way into SMM or out again (I cannot imagine as the SMM monitor
   should always be able to run, at least in the absence of CPU
   erratas).

 - what the SMM monitor could do wrong to cause such a crash,
   especially as it looks like the hardware does all the switching for
   it.

 - if there could still be some KVM crash around host-guest switching
   that just happens to be triggered by the SMI noise and that affects
   the whole system (including cores that do not host KVM threads).

Any ideas warmly welcome!

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: TSC in nested SVM and VMX

2010-10-01 Thread Zachary Amsden

On 10/01/2010 04:46 AM, Alexander Graf wrote:

On 01.10.2010, at 13:21, Nadav Har'El wrote:

   

On Thu, Sep 30, 2010, Zachary Amsden wrote about Re: TSC in nested SVM and 
VMX:
 

1)  When reading an MSR, we are not emulating the L2 guest; we are
DIRECTLY reading the MSR for the L1 emulation.  Any emulation of the L2
guest is actually done by the code running /inside/ the L1 emulation, so
MSR reads for the L2 guest are handed by L1, and MSR reads for the L1
guest are handled by L0, which is this code.
...
So if we are currently running nested, the L1 tsc_offset is stored in
the nested.hsave field; the vmcb which is active is polluted by the L2
guest offset, which would be incorrect to return to the L1 emulation.
   

Thanks for the detailed explanation.

It seems, then, that the nested VMX logic is somewhat different from that
of the nested SVM. In nested VMX, if a function gets called when running
L1, the current VMCS will be that of L1 (aka vmcs01), not of its guest L2
(and I'm not even sure *which* L2 that would be when there are multiple
L2 guests for the one L1).
 

If the #vmexit comes while you're in L1, everything works on the L1's vmcb. If 
you hit it while in L2, everything works on the L2's vmcb unless special 
attention is taken.

The reason behind the TSC shift is very simple. With the tsc_offset setting 
we're trying to adjust the L1's offset. Adjusting the L1's offset means we need 
to adjust L1 and L2 alike, as the virtual L2's offset == L1 offset + vmcb L2 
offset, because L2's TSC is also offset by the amount L1 is.

So basically what happens is:

nested VMRUN:

 svm-vmcb-control.tsc_offset += nested_vmcb-control.tsc_offset;

please note the +=!


svm_write_tsc_offset:

This gets called when we really want to current level's TSC offset only because 
the guest issued a tsc write. In L2 this means the L2's value.

 if (is_nested(svm)) {
 g_tsc_offset = svm-vmcb-control.tsc_offset -
svm-nested.hsave-control.tsc_offset;

Remember the difference between L1 and L2.

 svm-nested.hsave-control.tsc_offset = offset;

Set L1 to the new offset

 }

 svm-vmcb-control.tsc_offset = offset + g_tsc_offset;

Set L2 to new offset + delta.


So what this function does is that it treats TSC writes as L1 writes even while 
in L2 and adjusts L2 accordingly. Joerg, this sounds fishy to me. Are you sure 
this is intended and works when L1 doesn't intercept MSR writes to TSC?
   


L1 must intercept MSR writes to TSC for this to work.  It does, so all 
is well.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM with hugepages generate huge load with two guests

2010-10-01 Thread Marcelo Tosatti
On Thu, Sep 30, 2010 at 12:07:15PM +0300, Dmitry Golubev wrote:
 Hi,
 
 I am not sure what's really happening, but every few hours
 (unpredictable) two virtual machines (Linux 2.6.32) start to generate
 huge cpu loads. It looks like some kind of loop is unable to complete
 or something...
 
 So the idea is:
 
 1. I have two linux 2.6.32 x64 (openvz, proxmox project) guests
 running on linux 2.6.35 x64 (ubuntu maverick) host with a Q6600
 Core2Quad on qemu-kvm 0.12.5 and libvirt 0.8.3 and another one small
 32bit linux virtual machine (16MB of ram) with a router inside (i
 doubt it contributes to the problem).
 
 2. All these machines use hufetlbfs. The server has 8GB of RAM, I
 reserved 3696 huge pages (page size is 2MB) on the server, and I am
 running the main guests each having 3550MB of virtual memory. The
 third guest, as I wrote before, takes 16MB of virtual memory.
 
 3. Once run, the guests reserve huge pages for themselves normally. As
 mem-prealloc is default, they grab all the memory they should have,
 leaving 6 pages unreserved (HugePages_Free - HugePages_Rsvd = 6) all
 times - so as I understand they should not want to get any more,
 right?
 
 4. All virtual machines run perfectly normal without any disturbances
 for few hours. They do not, however, use all their memory, so maybe
 the issue arises when they pass some kind of a threshold.
 
 5. At some point of time both guests exhibit cpu load over the top
 (16-24). At the same time, host works perfectly well, showing load of
 8 and that both kvm processes use CPU equally and fully. This point of
 time is unpredictable - it can be anything from one to twenty hours,
 but it will be less than a day. Sometimes the load disappears in a
 moment, but usually it stays like that, and everything works extremely
 slow (even a 'ps' command executes some 2-5 minutes).
 
 6. If I am patient, I can start rebooting the gueat systems - once
 they have restarted, everything returns to normal. If I destroy one of
 the guests (virsh destroy), the other one starts working normally at
 once (!).
 
 I am relatively new to kvm and I am absolutely lost here. I have not
 experienced such problems before, but recently I upgraded from ubuntu
 lucid (I think it was linux 2.6.32, qemukvm 0.12.3 and libvirt 0.7.5)
 and started to use hugepages. These two virtual machines are not
 normally run on the same host system (i have a corosync/pacemaker
 cluster with drbd storage), but when one of the hosts is not
 abailable, they start running on the same host. That is the reason I
 have not noticed this earlier.
 
 Unfortunately, I don't have any spare hardware to experiment and this
 is a production system, so my debugging options are rather limited.
 
 Do you have any ideas, what could be wrong?

Is there swapping activity on the host when this happens? 

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Monitor command to translate guest physical address to host virtual address

2010-10-01 Thread Marcelo Tosatti
On Fri, Oct 01, 2010 at 09:52:25AM +0800, Huang Ying wrote:
 From: Max Asbock masb...@linux.vnet.ibm.com
 
 Add command p2v to translate guest physical address to host virtual
 address.
 
 The p2v command provides one step in a chain of translations from
 guest virtual to guest physical to host virtual to host physical. Host
 physical is then used to inject a machine check error. As a
 consequence the HWPOISON code on the host and the MCE injection code
 in qemu-kvm are exercised.
 
 Signed-off-by: Max Asbock masb...@linux.vnet.ibm.com
 Signed-off-by: Jiajia Zheng jiajia.zh...@intel.com
 Signed-off-by: Huang Ying ying.hu...@intel.com

Added missing cpu_physical_memory_unmap and applied, thanks.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM with hugepages generate huge load with two guests

2010-10-01 Thread Dmitry Golubev
Hi,

Thanks for reply. Well, although there is plenty of RAM left (about
100MB), some swap space was used during the operation:

Mem:   8193472k total,  8089788k used,   103684k free, 5768k buffers
Swap: 11716412k total,36636k used, 11679776k free,   103112k cached

I am not sure why, though. Are you saying that there are bursts of
memory usage that push some pages to swap and they are not unswapped
although used? I will try to replicate the problem now and send you
some better printout from the moment the problem happens. I have not
noticed anything unusual when I was watching the system - there was
plenty of RAM free and a few megabytes in swap... Is there any kind of
check I can try during the problem occurring? Or should I free
50-100MB from hugepages and the system shall be stable again?

Thanks,
Dmitry

On Sat, Oct 2, 2010 at 1:30 AM, Marcelo Tosatti mtosa...@redhat.com wrote:
 On Thu, Sep 30, 2010 at 12:07:15PM +0300, Dmitry Golubev wrote:
 Hi,

 I am not sure what's really happening, but every few hours
 (unpredictable) two virtual machines (Linux 2.6.32) start to generate
 huge cpu loads. It looks like some kind of loop is unable to complete
 or something...

 So the idea is:

 1. I have two linux 2.6.32 x64 (openvz, proxmox project) guests
 running on linux 2.6.35 x64 (ubuntu maverick) host with a Q6600
 Core2Quad on qemu-kvm 0.12.5 and libvirt 0.8.3 and another one small
 32bit linux virtual machine (16MB of ram) with a router inside (i
 doubt it contributes to the problem).

 2. All these machines use hufetlbfs. The server has 8GB of RAM, I
 reserved 3696 huge pages (page size is 2MB) on the server, and I am
 running the main guests each having 3550MB of virtual memory. The
 third guest, as I wrote before, takes 16MB of virtual memory.

 3. Once run, the guests reserve huge pages for themselves normally. As
 mem-prealloc is default, they grab all the memory they should have,
 leaving 6 pages unreserved (HugePages_Free - HugePages_Rsvd = 6) all
 times - so as I understand they should not want to get any more,
 right?

 4. All virtual machines run perfectly normal without any disturbances
 for few hours. They do not, however, use all their memory, so maybe
 the issue arises when they pass some kind of a threshold.

 5. At some point of time both guests exhibit cpu load over the top
 (16-24). At the same time, host works perfectly well, showing load of
 8 and that both kvm processes use CPU equally and fully. This point of
 time is unpredictable - it can be anything from one to twenty hours,
 but it will be less than a day. Sometimes the load disappears in a
 moment, but usually it stays like that, and everything works extremely
 slow (even a 'ps' command executes some 2-5 minutes).

 6. If I am patient, I can start rebooting the gueat systems - once
 they have restarted, everything returns to normal. If I destroy one of
 the guests (virsh destroy), the other one starts working normally at
 once (!).

 I am relatively new to kvm and I am absolutely lost here. I have not
 experienced such problems before, but recently I upgraded from ubuntu
 lucid (I think it was linux 2.6.32, qemukvm 0.12.3 and libvirt 0.7.5)
 and started to use hugepages. These two virtual machines are not
 normally run on the same host system (i have a corosync/pacemaker
 cluster with drbd storage), but when one of the hosts is not
 abailable, they start running on the same host. That is the reason I
 have not noticed this earlier.

 Unfortunately, I don't have any spare hardware to experiment and this
 is a production system, so my debugging options are rather limited.

 Do you have any ideas, what could be wrong?

 Is there swapping activity on the host when this happens?


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: KVM with hugepages generate huge load with two guests

2010-10-01 Thread Dmitry Golubev
OK, I have repeated the problem. The two machines were working fine
for few hours without some services running (these would take up some
gigabyte additionally in total), I ran these services again and some
40 minutes later the problem reappeared (may be a coincidence, though,
but I don't think so). From top command output it looks like this:

top - 03:38:10 up 2 days, 20:08,  1 user,  load average: 9.60, 6.92, 5.36
Tasks: 143 total,   3 running, 140 sleeping,   0 stopped,   0 zombie
Cpu(s): 85.7%us,  4.2%sy,  0.0%ni,  0.0%id,  0.0%wa,  0.0%hi, 10.0%si,  0.0%st
Mem:   8193472k total,  8056700k used,   136772k free, 4912k buffers
Swap: 11716412k total,64884k used, 11651528k free,55640k cached

  PID USER  PR  NI  VIRT  RES  SHR S %CPU %MEMTIME+  COMMAND
21306 libvirt-  20   0 3781m  10m 2408 S  190  0.1  31:36.09 kvm
 4984 libvirt-  20   0 3771m  19m 1440 S  180  0.2 390:30.04 kvm

Comparing to the previous shot i sent before (that was taken few hours
ago), and you will not see much difference in my opinion.

Note that I have 8GB of RAM and totally both VMs take up 7GB. There is
nothing else running on the server, except the VMs and cluster
software (drbd, pacemaker etc). Right now the drbd sync process is
taking some cpu resources - that is why the libvirt processes do not
show as 200% (physically, it is a quad-core processor). Is almost 1GB
really not enough for KVM to support two 3.5GB guests? I see 136MB of
free memory right now - it is not even used...

Thanks,
Dmitry

On Sat, Oct 2, 2010 at 2:50 AM, Dmitry Golubev lastg...@gmail.com wrote:
 Hi,

 Thanks for reply. Well, although there is plenty of RAM left (about
 100MB), some swap space was used during the operation:

 Mem:   8193472k total,  8089788k used,   103684k free,     5768k buffers
 Swap: 11716412k total,    36636k used, 11679776k free,   103112k cached

 I am not sure why, though. Are you saying that there are bursts of
 memory usage that push some pages to swap and they are not unswapped
 although used? I will try to replicate the problem now and send you
 some better printout from the moment the problem happens. I have not
 noticed anything unusual when I was watching the system - there was
 plenty of RAM free and a few megabytes in swap... Is there any kind of
 check I can try during the problem occurring? Or should I free
 50-100MB from hugepages and the system shall be stable again?

 Thanks,
 Dmitry

 On Sat, Oct 2, 2010 at 1:30 AM, Marcelo Tosatti mtosa...@redhat.com wrote:
 On Thu, Sep 30, 2010 at 12:07:15PM +0300, Dmitry Golubev wrote:
 Hi,

 I am not sure what's really happening, but every few hours
 (unpredictable) two virtual machines (Linux 2.6.32) start to generate
 huge cpu loads. It looks like some kind of loop is unable to complete
 or something...

 So the idea is:

 1. I have two linux 2.6.32 x64 (openvz, proxmox project) guests
 running on linux 2.6.35 x64 (ubuntu maverick) host with a Q6600
 Core2Quad on qemu-kvm 0.12.5 and libvirt 0.8.3 and another one small
 32bit linux virtual machine (16MB of ram) with a router inside (i
 doubt it contributes to the problem).

 2. All these machines use hufetlbfs. The server has 8GB of RAM, I
 reserved 3696 huge pages (page size is 2MB) on the server, and I am
 running the main guests each having 3550MB of virtual memory. The
 third guest, as I wrote before, takes 16MB of virtual memory.

 3. Once run, the guests reserve huge pages for themselves normally. As
 mem-prealloc is default, they grab all the memory they should have,
 leaving 6 pages unreserved (HugePages_Free - HugePages_Rsvd = 6) all
 times - so as I understand they should not want to get any more,
 right?

 4. All virtual machines run perfectly normal without any disturbances
 for few hours. They do not, however, use all their memory, so maybe
 the issue arises when they pass some kind of a threshold.

 5. At some point of time both guests exhibit cpu load over the top
 (16-24). At the same time, host works perfectly well, showing load of
 8 and that both kvm processes use CPU equally and fully. This point of
 time is unpredictable - it can be anything from one to twenty hours,
 but it will be less than a day. Sometimes the load disappears in a
 moment, but usually it stays like that, and everything works extremely
 slow (even a 'ps' command executes some 2-5 minutes).

 6. If I am patient, I can start rebooting the gueat systems - once
 they have restarted, everything returns to normal. If I destroy one of
 the guests (virsh destroy), the other one starts working normally at
 once (!).

 I am relatively new to kvm and I am absolutely lost here. I have not
 experienced such problems before, but recently I upgraded from ubuntu
 lucid (I think it was linux 2.6.32, qemukvm 0.12.3 and libvirt 0.7.5)
 and started to use hugepages. These two virtual machines are not
 normally run on the same host system (i have a corosync/pacemaker
 cluster with drbd storage), but when one of the hosts is not
 abailable, 

Re: 2.6.35-rc1 regression with pvclock and smp guests

2010-10-01 Thread Zachary Amsden

On 09/30/2010 01:07 PM, Michael Tokarev wrote:

01.10.2010 03:02, Michael Tokarev wrote:
   

30.09.2010 23:05, Marcelo Tosatti wrote:
[]
 

Arjan, Michael, can you try the following:

 From 3823c018162dc708b543cbdc680a4c7d63533fee Mon Sep 17 00:00:00 2001
From: Zachary Amsdenzams...@redhat.com
Date: Sat, 29 May 2010 17:52:46 -1000
Subject: [KVM V2 04/25] Fix SVM VMCB reset
Cc: Avi Kivitya...@redhat.com,
 Marcelo Tosattimtosa...@redhat.com,
 Glauber Costaglom...@redhat.com,
 linux-ker...@vger.kernel.org

On reset, VMCB TSC should be set to zero.  Instead, code was setting
tsc_offset to zero, which passes through the underlying TSC.

Signed-off-by: Zachary Amsdenzams...@redhat.com
---
  arch/x86/kvm/svm.c |2 +-
  1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 760c86e..46856d2 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -781,7 +781,7 @@ static void init_vmcb(struct vcpu_svm *svm)

control-iopm_base_pa = iopm_base;
control-msrpm_base_pa = __pa(svm-msrpm);
-   control-tsc_offset = 0;
+   guest_write_tsc(svm-vcpu, 0);
control-int_ctl = V_INTR_MASKING_MASK;
   

This fails to compile on 2.6.35.5:

arch/x86/kvm/svm.c: In function ‘init_vmcb’:
arch/x86/kvm/svm.c:769: error: implicit declaration of function 
‘guest_write_tsc’

I'll take a look tomorrow where that comes from.. hopefully ;)
 

Ok, that routine is static, defined in arch/x86/kvm/vmx.c
(not svm.c).  I'm not sure it's ok to use it in svm.c
directly, as it appears to be vmx-specific.

Thanks!

/mjt
   



Can you try this patch to see if it helps?  I believe it is also safe 
for Xen, but cc'ing to double check.
Try to fix setup_percpu_clockdev by moving it before interrupts
are enabled.

Signed-off-by: Zachary Amsden zams...@redhat.com

diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8b3bfc4..40a383b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -351,6 +351,8 @@ notrace static void __cpuinit start_secondary(void *unused)
unlock_vector_lock();
ipi_call_unlock();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+   x86_cpuinit.setup_percpu_clockev();
+
x86_platform.nmi_init();
 
/* enable local interrupts */
@@ -359,8 +361,6 @@ notrace static void __cpuinit start_secondary(void *unused)
/* to prevent fake stack check failure in clock setup */
boot_init_stack_canary();
 
-   x86_cpuinit.setup_percpu_clockev();
-
wmb();
cpu_idle();
 }