[Qemu-devel] [PATCH v3] virtio-pci: fix migration for pci bus master

2014-10-19 Thread Michael S. Tsirkin
Current support for bus master (clearing OK bit) together with the need to
support guests which do not enable PCI bus mastering, leads to extra state in
VIRTIO_PCI_FLAG_BUS_MASTER_BUG bit, which isn't robust in case of cross-version
migration for the case when guests use the device before setting DRIVER_OK.

Rip out this code, and replace it:
-   Modern QEMU doesn't need VIRTIO_PCI_FLAG_BUS_MASTER_BUG
so just drop it for latest machine type.
-   For compat machine types, set PCI_COMMAND if DRIVER_OK
is set.

As this is needed for 2.1 for both pc and ppc, move PC_COMPAT macros from pc.h
to a new common header.

Reviewed-by: Greg Kurz 
Tested-by: Greg Kurz 
Signed-off-by: Michael S. Tsirkin 
---

Alexander, could you pls ack me merging this?
Thanks!


changes from v2:
drop default = -1 from ppc - was a typo, reported by Greg

 hw/virtio/virtio-pci.h |  5 +
 include/hw/compat.h| 16 
 include/hw/i386/pc.h   | 10 ++
 hw/i386/pc_piix.c  |  2 +-
 hw/i386/pc_q35.c   |  2 +-
 hw/ppc/spapr.c |  7 +++
 hw/virtio/virtio-pci.c | 29 +++--
 7 files changed, 43 insertions(+), 28 deletions(-)
 create mode 100644 include/hw/compat.h

diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index 1cea157..8873b6d 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -53,6 +53,11 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
 #define VIRTIO_PCI_BUS_CLASS(klass) \
 OBJECT_CLASS_CHECK(VirtioPCIBusClass, klass, TYPE_VIRTIO_PCI_BUS)
 
+/* Need to activate work-arounds for buggy guests at vmstate load. */
+#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT  0
+#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION \
+(1 << VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT)
+
 /* Performance improves when virtqueue kick processing is decoupled from the
  * vcpu thread using ioeventfd for some devices. */
 #define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
diff --git a/include/hw/compat.h b/include/hw/compat.h
new file mode 100644
index 000..47f6ff5
--- /dev/null
+++ b/include/hw/compat.h
@@ -0,0 +1,16 @@
+#ifndef HW_COMPAT_H
+#define HW_COMPAT_H
+
+#define HW_COMPAT_2_1 \
+{\
+.driver   = "intel-hda",\
+.property = "old_msi_addr",\
+.value= "on",\
+},\
+{\
+.driver   = "virtio-pci",\
+.property = "virtio-pci-bus-master-bug-migration",\
+.value= "on",\
+}
+
+#endif /* HW_COMPAT_H */
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index bae023a..82ad046 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -14,6 +14,7 @@
 #include "sysemu/sysemu.h"
 #include "hw/pci/pci.h"
 #include "hw/boards.h"
+#include "hw/compat.h"
 
 #define HPET_INTCAP "hpet-intcap"
 
@@ -307,15 +308,8 @@ int e820_add_entry(uint64_t, uint64_t, uint32_t);
 int e820_get_num_entries(void);
 bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
 
-#define PC_COMPAT_2_1 \
-{\
-.driver   = "intel-hda",\
-.property = "old_msi_addr",\
-.value= "on",\
-}
-
 #define PC_COMPAT_2_0 \
-PC_COMPAT_2_1, \
+HW_COMPAT_2_1, \
 {\
 .driver   = "virtio-scsi-pci",\
 .property = "any_layout",\
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 553afdd..a1634ab 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -502,7 +502,7 @@ static QEMUMachine pc_i440fx_machine_v2_1 = {
 .name = "pc-i440fx-2.1",
 .init = pc_init_pci,
 .compat_props = (GlobalProperty[]) {
-PC_COMPAT_2_1,
+HW_COMPAT_2_1,
 { /* end of list */ }
 },
 };
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index a199043..f330f7a 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -365,7 +365,7 @@ static QEMUMachine pc_q35_machine_v2_1 = {
 .name = "pc-q35-2.1",
 .init = pc_q35_init,
 .compat_props = (GlobalProperty[]) {
-PC_COMPAT_2_1,
+HW_COMPAT_2_1,
 { /* end of list */ }
 },
 };
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 2becc9f..623f626 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -57,6 +57,8 @@
 #include "trace.h"
 #include "hw/nmi.h"
 
+#include "hw/compat.h"
+
 #include 
 
 /* SLOF memory layout:
@@ -1689,10 +1691,15 @@ static const TypeInfo spapr_machine_info = {
 static void spapr_machine_2_1_class_init(ObjectClass *oc, void *data)
 {
 MachineClass *mc = MACHINE_CLASS(oc);
+static GlobalProperty compat_props[] = {
+HW_COMPAT_2_1,
+{ /* end of list */ }
+};
 
 mc->name = "pseries-2.1";
 mc->desc = "pSeries Logical Partition (PAPR compliant) v2.1";
 mc->is_default = 0;
+mc->compat_props = compat_props;
 }
 
 static const TypeInfo spapr_machine_2_1_info = {
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index a827cd4..a499a3c 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -86,9 +86,6

Re: [Qemu-devel] [PATCH 2/2] iotests: Add test for qcow2 L1 table update

2014-10-19 Thread Peter Lieven

On 16.10.2014 15:25, Max Reitz wrote:

Updating the L1 table should not result in random data being written.
This adds a test for that.

Signed-off-by: Max Reitz 
---
  tests/qemu-iotests/107 | 61 ++
  tests/qemu-iotests/107.out | 10 
  tests/qemu-iotests/group   |  1 +
  3 files changed, 72 insertions(+)
  create mode 100755 tests/qemu-iotests/107
  create mode 100644 tests/qemu-iotests/107.out

diff --git a/tests/qemu-iotests/107 b/tests/qemu-iotests/107
new file mode 100755
index 000..cad1cf9
--- /dev/null
+++ b/tests/qemu-iotests/107
@@ -0,0 +1,61 @@
+#!/bin/bash
+#
+# Tests updates of the qcow2 L1 table
+#
+# Copyright (C) 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see .
+#
+
+# creator
+owner=mre...@redhat.com
+
+seq="$(basename $0)"
+echo "QA output created by $seq"
+
+here="$PWD"
+tmp=/tmp/$$
+status=1   # failure is the default!
+
+_cleanup()
+{
+   _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt qcow2
+_supported_proto file


This (and maybe other recently added tests) also works on NFS.
As NFS on QCOW2 might be a reasonable combination I would add it.

Peter




Re: [Qemu-devel] [PATCH] fix the memory leak for share hugepage

2014-10-19 Thread Wen Congyang
On 10/20/2014 02:17 PM, Linhaifeng wrote:
> 
> 
> On 2014/10/20 13:32, Wen Congyang wrote:
>> On 10/20/2014 12:48 PM, Linhaifeng wrote:
>>>
>>>
>>> On 2014/10/20 10:12, Wen Congyang wrote:
 On 10/18/2014 11:20 AM, Linhaifeng wrote:
>
>
> On 2014/10/17 21:26, Daniel P. Berrange wrote:
>> On Fri, Oct 17, 2014 at 04:57:27PM +0800, Linhaifeng wrote:
>>>
>>>
>>> On 2014/10/17 16:33, Daniel P. Berrange wrote:
 On Fri, Oct 17, 2014 at 04:27:17PM +0800, haifeng@huawei.com wrote:
> From: linhaifeng 
>
> The VM start with share hugepage should close the hugefile fd
> when exit.Because the hugepage fd may be send to other process
> e.g vhost-user If qemu not close the fd the other process can
> not free the hugepage otherwise exit process,this is ugly,so
> qemu should close all shared fd when exit.
>
> Signed-off-by: linhaifeng 

 Err, all file descriptors are closed automatically when a process
 exits. So manually calling close(fd) before exit can't have any
 functional effect on a resource leak.

 If QEMU has sent the FD to another process, that process has a
 completely separate copy of the FD. Closing the FD in QEMU will
 not close the FD in the other process. You need the other process
 to exit for the copy to be closed.

 Regards,
 Daniel

>>> Hi,daniel
>>>
>>> QEMU send the fd by unix domain socket.unix domain socket just install 
>>> the fd to
>>> other process and inc the f_count,if qemu not close the fd the f_count 
>>> is not dec.
>>> Then the other process even close the fd the hugepage would not freed 
>>> whise the other process exit.
>>
>> The kernel always closes all FDs when a process exits. So if this FD is
>> not being correctly closed then it is a kernel bug. There should never
>> be any reason for an application to do close(fd) before exiting.
>>
>> Regards,
>> Daniel
>>
> Hi,daniel
>
> I don't think this is kernel's bug.May be this a problem about usage.
> If you open a file you should close it too.

 If you don't close it, the kernel will help you when the program exits.

>>> Yes,when the hugepage is only used for qemu,the kernel will free the file 
>>> object.If the hugepage shared for other process,when qemu exit the kernel 
>>> will not free the file.
>>
>> Even if the hugepage is shared with the other process, the kernel will auto 
>> close the fd when qemu
>> exits. If the kernel doesn't do it, it is a kernel bug.
>>
> Kernel supply close to fix this bug.If you call open you must call close.
> If not, the result is unpredictability.

No, if the program exists, the kernel must close all fd used by the program.
So, there is no need to close fd before program exists.

Thanks
Wen Congyang

>
> This is <>about how to free resource of file.
> http://linux.die.net/man/2/close
>
>
> I'm trying to describe my problem.
>
> For example, there are 2 VMs run with hugepage and the hugepage only for 
> QEMU to use.
>
> Before run VM the meminfo is :
> HugePages_Total:4096
> HugePages_Free: 4096
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
>
> Run the two VMs.QEMU deal with hugepage as follow steps:
> 1.open
> 2.unlink
> 3.mmap
> 4.use memory of hugepage.After this step the meminfo is :
> HugePages_Total:4096
> HugePages_Free:0
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
> 5.shutdown VM with signal 15 without close(fd).After this step the 
> meminfo is :
> HugePages_Total:4096
> HugePages_Free: 4096
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
>
> Yes,it works well,like you said the kernel recycle all resources.
>
> For another example,there are 2 VMs run with hugepage and share the 
> hugepage with vapp(a vhost-user application).

 The vapp is your internal application?

>>> Yes vapp is a application to share the QEMU's hugepage.So threr are two 
>>> process use the hugepage.
>>>
>
> Before run VM the meminfo is :
> HugePages_Total:4096
> HugePages_Free: 4096
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
>
> Run the first VM.QEMU deal with hugepage as follow steps:
> 1.open
> 2.unlink
> 3.mmap
> 4.use memory of hugepage and send the fd to vapp with unix domain 
> socket.After this step the meminfo is:
>>
>> Do you modify qemu?
>>
> HugePages_Total:4096
> HugePages_Free: 2048
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
>

Re: [Qemu-devel] [PATCH 1/2] qcow2: Do not overflow when writing an L1 sector

2014-10-19 Thread Peter Lieven

On 16.10.2014 15:25, Max Reitz wrote:

While writing an L1 table sector, qcow2_write_l1_entry() copies the
respective range from s->l1_table to the local "buf" array. The size of
s->l1_table does not have to be a multiple of L1_ENTRIES_PER_SECTOR;
thus, limit the index which is used for copying all entries to the L1
size.

Cc: qemu-sta...@nongnu.org
Signed-off-by: Max Reitz 
---
  block/qcow2-cluster.c | 6 --
  1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index f7dd8c0..4d888c7 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -164,12 +164,14 @@ static int l2_load(BlockDriverState *bs, uint64_t 
l2_offset,
  int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
  {
  BDRVQcowState *s = bs->opaque;
-uint64_t buf[L1_ENTRIES_PER_SECTOR];
+uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
  int l1_start_index;
  int i, ret;
  
  l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);

-for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
+for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
+ i++)
+{
  buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
  }
  


Reviewed-by: Peter Lieven 




Re: [Qemu-devel] [PATCH] fix the memory leak for share hugepage

2014-10-19 Thread Linhaifeng


On 2014/10/20 13:32, Wen Congyang wrote:
> On 10/20/2014 12:48 PM, Linhaifeng wrote:
>>
>>
>> On 2014/10/20 10:12, Wen Congyang wrote:
>>> On 10/18/2014 11:20 AM, Linhaifeng wrote:


 On 2014/10/17 21:26, Daniel P. Berrange wrote:
> On Fri, Oct 17, 2014 at 04:57:27PM +0800, Linhaifeng wrote:
>>
>>
>> On 2014/10/17 16:33, Daniel P. Berrange wrote:
>>> On Fri, Oct 17, 2014 at 04:27:17PM +0800, haifeng@huawei.com wrote:
 From: linhaifeng 

 The VM start with share hugepage should close the hugefile fd
 when exit.Because the hugepage fd may be send to other process
 e.g vhost-user If qemu not close the fd the other process can
 not free the hugepage otherwise exit process,this is ugly,so
 qemu should close all shared fd when exit.

 Signed-off-by: linhaifeng 
>>>
>>> Err, all file descriptors are closed automatically when a process
>>> exits. So manually calling close(fd) before exit can't have any
>>> functional effect on a resource leak.
>>>
>>> If QEMU has sent the FD to another process, that process has a
>>> completely separate copy of the FD. Closing the FD in QEMU will
>>> not close the FD in the other process. You need the other process
>>> to exit for the copy to be closed.
>>>
>>> Regards,
>>> Daniel
>>>
>> Hi,daniel
>>
>> QEMU send the fd by unix domain socket.unix domain socket just install 
>> the fd to
>> other process and inc the f_count,if qemu not close the fd the f_count 
>> is not dec.
>> Then the other process even close the fd the hugepage would not freed 
>> whise the other process exit.
>
> The kernel always closes all FDs when a process exits. So if this FD is
> not being correctly closed then it is a kernel bug. There should never
> be any reason for an application to do close(fd) before exiting.
>
> Regards,
> Daniel
>
 Hi,daniel

 I don't think this is kernel's bug.May be this a problem about usage.
 If you open a file you should close it too.
>>>
>>> If you don't close it, the kernel will help you when the program exits.
>>>
>> Yes,when the hugepage is only used for qemu,the kernel will free the file 
>> object.If the hugepage shared for other process,when qemu exit the kernel 
>> will not free the file.
> 
> Even if the hugepage is shared with the other process, the kernel will auto 
> close the fd when qemu
> exits. If the kernel doesn't do it, it is a kernel bug.
> 
Kernel supply close to fix this bug.If you call open you must call close.
If not, the result is unpredictability.

 This is <>about how to free resource of file.
 http://linux.die.net/man/2/close


 I'm trying to describe my problem.

 For example, there are 2 VMs run with hugepage and the hugepage only for 
 QEMU to use.

 Before run VM the meminfo is :
 HugePages_Total:4096
 HugePages_Free: 4096
 HugePages_Rsvd:0
 HugePages_Surp:0
 Hugepagesize:   2048 kB

 Run the two VMs.QEMU deal with hugepage as follow steps:
 1.open
 2.unlink
 3.mmap
 4.use memory of hugepage.After this step the meminfo is :
 HugePages_Total:4096
 HugePages_Free:0
 HugePages_Rsvd:0
 HugePages_Surp:0
 Hugepagesize:   2048 kB
 5.shutdown VM with signal 15 without close(fd).After this step the meminfo 
 is :
 HugePages_Total:4096
 HugePages_Free: 4096
 HugePages_Rsvd:0
 HugePages_Surp:0
 Hugepagesize:   2048 kB

 Yes,it works well,like you said the kernel recycle all resources.

 For another example,there are 2 VMs run with hugepage and share the 
 hugepage with vapp(a vhost-user application).
>>>
>>> The vapp is your internal application?
>>>
>> Yes vapp is a application to share the QEMU's hugepage.So threr are two 
>> process use the hugepage.
>>

 Before run VM the meminfo is :
 HugePages_Total:4096
 HugePages_Free: 4096
 HugePages_Rsvd:0
 HugePages_Surp:0
 Hugepagesize:   2048 kB

 Run the first VM.QEMU deal with hugepage as follow steps:
 1.open
 2.unlink
 3.mmap
 4.use memory of hugepage and send the fd to vapp with unix domain 
 socket.After this step the meminfo is:
> 
> Do you modify qemu?
> 
 HugePages_Total:4096
 HugePages_Free: 2048
 HugePages_Rsvd:0
 HugePages_Surp:0
 Hugepagesize:   2048 kB

 Run the second VM.After this step the meminfo is:
 HugePages_Total:4096
 HugePages_Free:0
 HugePages_Rsvd:0
 HugePages_Surp:0
 Hugepagesize:   2048 kB

 Then I want to close the first VM and run another VM.After close the first 
 VM and close the fd in vapp the meminf

[Qemu-devel] [PATCH] block: add a knob to disable multiwrite_merge

2014-10-19 Thread Peter Lieven
the block layer silently merges write requests since
commit 40b4f539. This patch adds a knob to disable
this feature as there has been some discussion lately
if multiwrite is a good idea at all and as it falsifies
benchmarks.

Signed-off-by: Peter Lieven 
---
 block.c   |4 
 block/qapi.c  |1 +
 blockdev.c|7 +++
 hmp.c |4 
 include/block/block_int.h |1 +
 qapi/block-core.json  |   10 +-
 qemu-options.hx   |1 +
 qmp-commands.hx   |2 ++
 8 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/block.c b/block.c
index 27533f3..1658a72 100644
--- a/block.c
+++ b/block.c
@@ -4531,6 +4531,10 @@ static int multiwrite_merge(BlockDriverState *bs, 
BlockRequest *reqs,
 {
 int i, outidx;
 
+if (!bs->write_merging) {
+return num_reqs;
+}
+
 // Sort requests by start sector
 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
 
diff --git a/block/qapi.c b/block/qapi.c
index 9733ebd..02251dd 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -58,6 +58,7 @@ BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs)
 
 info->backing_file_depth = bdrv_get_backing_file_depth(bs);
 info->detect_zeroes = bs->detect_zeroes;
+info->write_merging = bs->write_merging;
 
 if (bs->io_limits_enabled) {
 ThrottleConfig cfg;
diff --git a/blockdev.c b/blockdev.c
index e595910..13e47b8 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -378,6 +378,7 @@ static DriveInfo *blockdev_init(const char *file, QDict 
*bs_opts,
 const char *id;
 bool has_driver_specific_opts;
 BlockdevDetectZeroesOptions detect_zeroes;
+bool write_merging;
 BlockDriver *drv = NULL;
 
 /* Check common options by copying from bs_opts to opts, all other options
@@ -405,6 +406,7 @@ static DriveInfo *blockdev_init(const char *file, QDict 
*bs_opts,
 snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
 ro = qemu_opt_get_bool(opts, "read-only", 0);
 copy_on_read = qemu_opt_get_bool(opts, "copy-on-read", false);
+write_merging = qemu_opt_get_bool(opts, "write-merging", true);
 
 if ((buf = qemu_opt_get(opts, "discard")) != NULL) {
 if (bdrv_parse_discard_flags(buf, &bdrv_flags) != 0) {
@@ -530,6 +532,7 @@ static DriveInfo *blockdev_init(const char *file, QDict 
*bs_opts,
 bs->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0;
 bs->read_only = ro;
 bs->detect_zeroes = detect_zeroes;
+bs->write_merging = write_merging;
 
 bdrv_set_on_error(bs, on_read_error, on_write_error);
 
@@ -2746,6 +2749,10 @@ QemuOptsList qemu_common_drive_opts = {
 .name = "detect-zeroes",
 .type = QEMU_OPT_STRING,
 .help = "try to optimize zero writes (off, on, unmap)",
+},{
+.name = "write-merging",
+.type = QEMU_OPT_BOOL,
+.help = "enable write merging (default: true)",
 },
 { /* end of list */ }
 },
diff --git a/hmp.c b/hmp.c
index 63d7686..8d6ad0b 100644
--- a/hmp.c
+++ b/hmp.c
@@ -348,6 +348,10 @@ void hmp_info_block(Monitor *mon, const QDict *qdict)

BlockdevDetectZeroesOptions_lookup[info->value->inserted->detect_zeroes]);
 }
 
+if (!info->value->inserted->write_merging) {
+monitor_printf(mon, "Write Merging:off\n");
+}
+
 if (info->value->inserted->bps
 || info->value->inserted->bps_rd
 || info->value->inserted->bps_wr
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 8d86a6c..39bbde2 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -407,6 +407,7 @@ struct BlockDriverState {
 
 QDict *options;
 BlockdevDetectZeroesOptions detect_zeroes;
+bool write_merging;
 
 /* The error object in use for blocking operations on backing_hd */
 Error *backing_blocker;
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 8f7089e..4931bd9 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -214,6 +214,8 @@
 #
 # @detect_zeroes: detect and optimize zero writes (Since 2.1)
 #
+# @write_merging: true if write merging is enabled (Since 2.2)
+#
 # @bps: total throughput limit in bytes per second is specified
 #
 # @bps_rd: read throughput limit in bytes per second is specified
@@ -250,6 +252,7 @@
 '*backing_file': 'str', 'backing_file_depth': 'int',
 'encrypted': 'bool', 'encryption_key_missing': 'bool',
 'detect_zeroes': 'BlockdevDetectZeroesOptions',
+'write_merging': 'bool',
 'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int',
 'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int',
 'image': 'ImageInfo',
@@ -1180,6 +1183,10 @@
 # (default: false)
 # @detect-zeroes: #optional detect and optimize zero writes (Since 2.1)
 # (default: off)
+# @write-merging: #optiona

Re: [Qemu-devel] [PATCH 1/2] i386: Add a Virtual Machine Generation ID device

2014-10-19 Thread Gal Hammer

On 19/10/2014 18:14, Michael S. Tsirkin wrote:

On Sun, Oct 19, 2014 at 04:43:07PM +0300, Gal Hammer wrote:

Based on Microsoft's sepecifications (paper can be dowloaded from
http://go.microsoft.com/fwlink/?LinkId=260709), add a device
description to the SSDT ACPI table and its implementation.

The GUID is set using a global "vmgenid.uuid" parameter.

Signed-off-by: Gal Hammer 

---
  default-configs/i386-softmmu.mak |   1 +
  default-configs/x86_64-softmmu.mak   |   1 +
  hw/acpi/core.c   |   8 +++
  hw/acpi/ich9.c   |   8 +++
  hw/acpi/piix4.c  |   8 +++
  hw/i386/acpi-build.c |   8 +++
  hw/i386/acpi-dsdt.dsl|   4 +-
  hw/i386/acpi-dsdt.hex.generated  |   6 +-
  hw/i386/pc.c |   8 +++
  hw/i386/q35-acpi-dsdt.dsl|   5 +-
  hw/i386/q35-acpi-dsdt.hex.generated  |   8 +--
  hw/i386/ssdt-misc.dsl|  36 +++
  hw/i386/ssdt-misc.hex.generated  |   8 +--
  hw/isa/lpc_ich9.c|   1 +
  hw/misc/Makefile.objs|   1 +
  hw/misc/vmgenid.c| 116 +++
  include/hw/acpi/acpi.h   |   2 +
  include/hw/acpi/acpi_dev_interface.h |   4 ++
  include/hw/acpi/ich9.h   |   2 +
  include/hw/i386/pc.h |   3 +
  include/hw/misc/vmgenid.h|  21 +++
  21 files changed, 246 insertions(+), 13 deletions(-)
  create mode 100644 hw/misc/vmgenid.c
  create mode 100644 include/hw/misc/vmgenid.h


Please document the host/guest API.
It seems that you are using a hard-coded hardware address,
and using up a GPE.


I'll add a document file which describes the device's implementation.






diff --git a/default-configs/i386-softmmu.mak b/default-configs/i386-softmmu.mak
index 8e08841..bd33c75 100644
--- a/default-configs/i386-softmmu.mak
+++ b/default-configs/i386-softmmu.mak
@@ -45,3 +45,4 @@ CONFIG_IOAPIC=y
  CONFIG_ICC_BUS=y
  CONFIG_PVPANIC=y
  CONFIG_MEM_HOTPLUG=y
+CONFIG_VMGENID=y
diff --git a/default-configs/x86_64-softmmu.mak 
b/default-configs/x86_64-softmmu.mak
index 66557ac..006fc7c 100644
--- a/default-configs/x86_64-softmmu.mak
+++ b/default-configs/x86_64-softmmu.mak
@@ -45,3 +45,4 @@ CONFIG_IOAPIC=y
  CONFIG_ICC_BUS=y
  CONFIG_PVPANIC=y
  CONFIG_MEM_HOTPLUG=y
+CONFIG_VMGENID=y
diff --git a/hw/acpi/core.c b/hw/acpi/core.c
index a7368fb..a01c980 100644
--- a/hw/acpi/core.c
+++ b/hw/acpi/core.c
@@ -28,6 +28,8 @@
  #include "qapi-visit.h"
  #include "qapi-event.h"

+#define ACPI_VM_GENERATION_ID_CHANGED_STATUS 1
+
  struct acpi_table_header {
  uint16_t _length; /* our length, not actual part of the hdr */
/* allows easier parsing for fw_cfg clients */
@@ -680,3 +682,9 @@ void acpi_update_sci(ACPIREGS *regs, qemu_irq irq)
 (regs->pm1.evt.en & ACPI_BITMASK_TIMER_ENABLE) &&
 !(pm1a_sts & ACPI_BITMASK_TIMER_STATUS));
  }
+
+void acpi_vm_generation_id_changed(ACPIREGS *acpi_regs, qemu_irq irq)
+{
+acpi_regs->gpe.sts[0] |= ACPI_VM_GENERATION_ID_CHANGED_STATUS;
+acpi_update_sci(acpi_regs, irq);
+}
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index 7b14bbb..5501c0e 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -316,3 +316,11 @@ void ich9_pm_ospm_status(AcpiDeviceIf *adev, 
ACPIOSTInfoList ***list)

  acpi_memory_ospm_status(&s->pm.acpi_memory_hotplug, list);
  }
+
+void ich9_vm_generation_id_changed(AcpiDeviceIf *adev)
+{
+ICH9LPCState *s = ICH9_LPC_DEVICE(adev);
+ICH9LPCPMRegs *pm = &s->pm;
+
+acpi_vm_generation_id_changed(&pm->acpi_regs, pm->irq);
+}
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index 0bfa814..ad0ef68 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -580,6 +580,13 @@ static void piix4_ospm_status(AcpiDeviceIf *adev, 
ACPIOSTInfoList ***list)
  acpi_memory_ospm_status(&s->acpi_memory_hotplug, list);
  }

+static void piix4_vm_generation_id_changed(AcpiDeviceIf *adev)
+{
+PIIX4PMState *s = PIIX4_PM(adev);
+
+acpi_vm_generation_id_changed(&s->ar, s->irq);
+}
+
  static Property piix4_pm_properties[] = {
  DEFINE_PROP_UINT32("smb_io_base", PIIX4PMState, smb_io_base, 0),
  DEFINE_PROP_UINT8(ACPI_PM_PROP_S3_DISABLED, PIIX4PMState, disable_s3, 0),
@@ -617,6 +624,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void 
*data)
  hc->plug = piix4_device_plug_cb;
  hc->unplug_request = piix4_device_unplug_request_cb;
  adevc->ospm_status = piix4_ospm_status;
+adevc->vm_generation_id_changed = piix4_vm_generation_id_changed;
  }

  static const TypeInfo piix4_pm_info = {
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 00be4bb..27d0494 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -42,6 +42,7 @@
  #include "hw/acpi/memory_hotplug.h"
  #include "sysemu/tpm.h"
  #include "hw/acpi/tpm.h"
+#include "hw/misc/vmgenid.h"

  /* Supported chipsets: 

Re: [Qemu-devel] [PATCH] fix the memory leak for share hugepage

2014-10-19 Thread Wen Congyang
On 10/20/2014 12:48 PM, Linhaifeng wrote:
> 
> 
> On 2014/10/20 10:12, Wen Congyang wrote:
>> On 10/18/2014 11:20 AM, Linhaifeng wrote:
>>>
>>>
>>> On 2014/10/17 21:26, Daniel P. Berrange wrote:
 On Fri, Oct 17, 2014 at 04:57:27PM +0800, Linhaifeng wrote:
>
>
> On 2014/10/17 16:33, Daniel P. Berrange wrote:
>> On Fri, Oct 17, 2014 at 04:27:17PM +0800, haifeng@huawei.com wrote:
>>> From: linhaifeng 
>>>
>>> The VM start with share hugepage should close the hugefile fd
>>> when exit.Because the hugepage fd may be send to other process
>>> e.g vhost-user If qemu not close the fd the other process can
>>> not free the hugepage otherwise exit process,this is ugly,so
>>> qemu should close all shared fd when exit.
>>>
>>> Signed-off-by: linhaifeng 
>>
>> Err, all file descriptors are closed automatically when a process
>> exits. So manually calling close(fd) before exit can't have any
>> functional effect on a resource leak.
>>
>> If QEMU has sent the FD to another process, that process has a
>> completely separate copy of the FD. Closing the FD in QEMU will
>> not close the FD in the other process. You need the other process
>> to exit for the copy to be closed.
>>
>> Regards,
>> Daniel
>>
> Hi,daniel
>
> QEMU send the fd by unix domain socket.unix domain socket just install 
> the fd to
> other process and inc the f_count,if qemu not close the fd the f_count is 
> not dec.
> Then the other process even close the fd the hugepage would not freed 
> whise the other process exit.

 The kernel always closes all FDs when a process exits. So if this FD is
 not being correctly closed then it is a kernel bug. There should never
 be any reason for an application to do close(fd) before exiting.

 Regards,
 Daniel

>>> Hi,daniel
>>>
>>> I don't think this is kernel's bug.May be this a problem about usage.
>>> If you open a file you should close it too.
>>
>> If you don't close it, the kernel will help you when the program exits.
>>
> Yes,when the hugepage is only used for qemu,the kernel will free the file 
> object.If the hugepage shared for other process,when qemu exit the kernel 
> will not free the file.

Even if the hugepage is shared with the other process, the kernel will auto 
close the fd when qemu
exits. If the kernel doesn't do it, it is a kernel bug.

>>>
>>> This is <>about how to free resource of file.
>>> http://linux.die.net/man/2/close
>>>
>>>
>>> I'm trying to describe my problem.
>>>
>>> For example, there are 2 VMs run with hugepage and the hugepage only for 
>>> QEMU to use.
>>>
>>> Before run VM the meminfo is :
>>> HugePages_Total:4096
>>> HugePages_Free: 4096
>>> HugePages_Rsvd:0
>>> HugePages_Surp:0
>>> Hugepagesize:   2048 kB
>>>
>>> Run the two VMs.QEMU deal with hugepage as follow steps:
>>> 1.open
>>> 2.unlink
>>> 3.mmap
>>> 4.use memory of hugepage.After this step the meminfo is :
>>> HugePages_Total:4096
>>> HugePages_Free:0
>>> HugePages_Rsvd:0
>>> HugePages_Surp:0
>>> Hugepagesize:   2048 kB
>>> 5.shutdown VM with signal 15 without close(fd).After this step the meminfo 
>>> is :
>>> HugePages_Total:4096
>>> HugePages_Free: 4096
>>> HugePages_Rsvd:0
>>> HugePages_Surp:0
>>> Hugepagesize:   2048 kB
>>>
>>> Yes,it works well,like you said the kernel recycle all resources.
>>>
>>> For another example,there are 2 VMs run with hugepage and share the 
>>> hugepage with vapp(a vhost-user application).
>>
>> The vapp is your internal application?
>>
> Yes vapp is a application to share the QEMU's hugepage.So threr are two 
> process use the hugepage.
> 
>>>
>>> Before run VM the meminfo is :
>>> HugePages_Total:4096
>>> HugePages_Free: 4096
>>> HugePages_Rsvd:0
>>> HugePages_Surp:0
>>> Hugepagesize:   2048 kB
>>>
>>> Run the first VM.QEMU deal with hugepage as follow steps:
>>> 1.open
>>> 2.unlink
>>> 3.mmap
>>> 4.use memory of hugepage and send the fd to vapp with unix domain 
>>> socket.After this step the meminfo is:

Do you modify qemu?

>>> HugePages_Total:4096
>>> HugePages_Free: 2048
>>> HugePages_Rsvd:0
>>> HugePages_Surp:0
>>> Hugepagesize:   2048 kB
>>>
>>> Run the second VM.After this step the meminfo is:
>>> HugePages_Total:4096
>>> HugePages_Free:0
>>> HugePages_Rsvd:0
>>> HugePages_Surp:0
>>> Hugepagesize:   2048 kB
>>>
>>> Then I want to close the first VM and run another VM.After close the first 
>>> VM and close the fd in vapp the meminfo is :
>>> HugePages_Total:4096
>>> HugePages_Free:0
>>> HugePages_Rsvd:0
>>> HugePages_Surp:0
>>> Hugepagesize:   2048 kB
>>
>> Does the qemu still run after you close the first VM? If the qemu exits, the 
>> fd will be closed by the kernel, so this
>> bug is very strange.

Re: [Qemu-devel] [PATCH] fix the memory leak for share hugepage

2014-10-19 Thread Linhaifeng


On 2014/10/20 10:12, Wen Congyang wrote:
> On 10/18/2014 11:20 AM, Linhaifeng wrote:
>>
>>
>> On 2014/10/17 21:26, Daniel P. Berrange wrote:
>>> On Fri, Oct 17, 2014 at 04:57:27PM +0800, Linhaifeng wrote:


 On 2014/10/17 16:33, Daniel P. Berrange wrote:
> On Fri, Oct 17, 2014 at 04:27:17PM +0800, haifeng@huawei.com wrote:
>> From: linhaifeng 
>>
>> The VM start with share hugepage should close the hugefile fd
>> when exit.Because the hugepage fd may be send to other process
>> e.g vhost-user If qemu not close the fd the other process can
>> not free the hugepage otherwise exit process,this is ugly,so
>> qemu should close all shared fd when exit.
>>
>> Signed-off-by: linhaifeng 
>
> Err, all file descriptors are closed automatically when a process
> exits. So manually calling close(fd) before exit can't have any
> functional effect on a resource leak.
>
> If QEMU has sent the FD to another process, that process has a
> completely separate copy of the FD. Closing the FD in QEMU will
> not close the FD in the other process. You need the other process
> to exit for the copy to be closed.
>
> Regards,
> Daniel
>
 Hi,daniel

 QEMU send the fd by unix domain socket.unix domain socket just install the 
 fd to
 other process and inc the f_count,if qemu not close the fd the f_count is 
 not dec.
 Then the other process even close the fd the hugepage would not freed 
 whise the other process exit.
>>>
>>> The kernel always closes all FDs when a process exits. So if this FD is
>>> not being correctly closed then it is a kernel bug. There should never
>>> be any reason for an application to do close(fd) before exiting.
>>>
>>> Regards,
>>> Daniel
>>>
>> Hi,daniel
>>
>> I don't think this is kernel's bug.May be this a problem about usage.
>> If you open a file you should close it too.
> 
> If you don't close it, the kernel will help you when the program exits.
> 
Yes,when the hugepage is only used for qemu,the kernel will free the file 
object.If the hugepage shared for other process,when qemu exit the kernel will 
not free the file.
>>
>> This is <>about how to free resource of file.
>> http://linux.die.net/man/2/close
>>
>>
>> I'm trying to describe my problem.
>>
>> For example, there are 2 VMs run with hugepage and the hugepage only for 
>> QEMU to use.
>>
>> Before run VM the meminfo is :
>> HugePages_Total:4096
>> HugePages_Free: 4096
>> HugePages_Rsvd:0
>> HugePages_Surp:0
>> Hugepagesize:   2048 kB
>>
>> Run the two VMs.QEMU deal with hugepage as follow steps:
>> 1.open
>> 2.unlink
>> 3.mmap
>> 4.use memory of hugepage.After this step the meminfo is :
>> HugePages_Total:4096
>> HugePages_Free:0
>> HugePages_Rsvd:0
>> HugePages_Surp:0
>> Hugepagesize:   2048 kB
>> 5.shutdown VM with signal 15 without close(fd).After this step the meminfo 
>> is :
>> HugePages_Total:4096
>> HugePages_Free: 4096
>> HugePages_Rsvd:0
>> HugePages_Surp:0
>> Hugepagesize:   2048 kB
>>
>> Yes,it works well,like you said the kernel recycle all resources.
>>
>> For another example,there are 2 VMs run with hugepage and share the hugepage 
>> with vapp(a vhost-user application).
> 
> The vapp is your internal application?
> 
Yes vapp is a application to share the QEMU's hugepage.So threr are two process 
use the hugepage.

>>
>> Before run VM the meminfo is :
>> HugePages_Total:4096
>> HugePages_Free: 4096
>> HugePages_Rsvd:0
>> HugePages_Surp:0
>> Hugepagesize:   2048 kB
>>
>> Run the first VM.QEMU deal with hugepage as follow steps:
>> 1.open
>> 2.unlink
>> 3.mmap
>> 4.use memory of hugepage and send the fd to vapp with unix domain 
>> socket.After this step the meminfo is:
>> HugePages_Total:4096
>> HugePages_Free: 2048
>> HugePages_Rsvd:0
>> HugePages_Surp:0
>> Hugepagesize:   2048 kB
>>
>> Run the second VM.After this step the meminfo is:
>> HugePages_Total:4096
>> HugePages_Free:0
>> HugePages_Rsvd:0
>> HugePages_Surp:0
>> Hugepagesize:   2048 kB
>>
>> Then I want to close the first VM and run another VM.After close the first 
>> VM and close the fd in vapp the meminfo is :
>> HugePages_Total:4096
>> HugePages_Free:0
>> HugePages_Rsvd:0
>> HugePages_Surp:0
>> Hugepagesize:   2048 kB
> 
> Does the qemu still run after you close the first VM? If the qemu exits, the 
> fd will be closed by the kernel, so this
> bug is very strange.
> 
qemu is not run when close the first VM.If other process used the file will be 
closed by kernel too?

>>
>> So failed to run the third VM because the first VM have not free the 
>> hugepage.After apply this patch the meminfo is:
>> HugePages_Total:4096
>> HugePages_Free: 2048
>> HugePages_Rsvd:0
>> HugePages_Surp:0
>> Hugepagesize:   

Re: [Qemu-devel] [PATCH] fix the memory leak for share hugepage

2014-10-19 Thread Wen Congyang
On 10/18/2014 11:20 AM, Linhaifeng wrote:
> 
> 
> On 2014/10/17 21:26, Daniel P. Berrange wrote:
>> On Fri, Oct 17, 2014 at 04:57:27PM +0800, Linhaifeng wrote:
>>>
>>>
>>> On 2014/10/17 16:33, Daniel P. Berrange wrote:
 On Fri, Oct 17, 2014 at 04:27:17PM +0800, haifeng@huawei.com wrote:
> From: linhaifeng 
>
> The VM start with share hugepage should close the hugefile fd
> when exit.Because the hugepage fd may be send to other process
> e.g vhost-user If qemu not close the fd the other process can
> not free the hugepage otherwise exit process,this is ugly,so
> qemu should close all shared fd when exit.
>
> Signed-off-by: linhaifeng 

 Err, all file descriptors are closed automatically when a process
 exits. So manually calling close(fd) before exit can't have any
 functional effect on a resource leak.

 If QEMU has sent the FD to another process, that process has a
 completely separate copy of the FD. Closing the FD in QEMU will
 not close the FD in the other process. You need the other process
 to exit for the copy to be closed.

 Regards,
 Daniel

>>> Hi,daniel
>>>
>>> QEMU send the fd by unix domain socket.unix domain socket just install the 
>>> fd to
>>> other process and inc the f_count,if qemu not close the fd the f_count is 
>>> not dec.
>>> Then the other process even close the fd the hugepage would not freed whise 
>>> the other process exit.
>>
>> The kernel always closes all FDs when a process exits. So if this FD is
>> not being correctly closed then it is a kernel bug. There should never
>> be any reason for an application to do close(fd) before exiting.
>>
>> Regards,
>> Daniel
>>
> Hi,daniel
> 
> I don't think this is kernel's bug.May be this a problem about usage.
> If you open a file you should close it too.

If you don't close it, the kernel will help you when the program exits.

> 
> This is <>about how to free resource of file.
> http://linux.die.net/man/2/close
> 
> 
> I'm trying to describe my problem.
> 
> For example, there are 2 VMs run with hugepage and the hugepage only for QEMU 
> to use.
> 
> Before run VM the meminfo is :
> HugePages_Total:4096
> HugePages_Free: 4096
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
> 
> Run the two VMs.QEMU deal with hugepage as follow steps:
> 1.open
> 2.unlink
> 3.mmap
> 4.use memory of hugepage.After this step the meminfo is :
> HugePages_Total:4096
> HugePages_Free:0
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
> 5.shutdown VM with signal 15 without close(fd).After this step the meminfo is 
> :
> HugePages_Total:4096
> HugePages_Free: 4096
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
> 
> Yes,it works well,like you said the kernel recycle all resources.
> 
> For another example,there are 2 VMs run with hugepage and share the hugepage 
> with vapp(a vhost-user application).

The vapp is your internal application?

> 
> Before run VM the meminfo is :
> HugePages_Total:4096
> HugePages_Free: 4096
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
> 
> Run the first VM.QEMU deal with hugepage as follow steps:
> 1.open
> 2.unlink
> 3.mmap
> 4.use memory of hugepage and send the fd to vapp with unix domain 
> socket.After this step the meminfo is:
> HugePages_Total:4096
> HugePages_Free: 2048
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
> 
> Run the second VM.After this step the meminfo is:
> HugePages_Total:4096
> HugePages_Free:0
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
> 
> Then I want to close the first VM and run another VM.After close the first VM 
> and close the fd in vapp the meminfo is :
> HugePages_Total:4096
> HugePages_Free:0
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB

Does the qemu still run after you close the first VM? If the qemu exits, the fd 
will be closed by the kernel, so this
bug is very strange.

> 
> So failed to run the third VM because the first VM have not free the 
> hugepage.After apply this patch the meminfo is:
> HugePages_Total:4096
> HugePages_Free: 2048
> HugePages_Rsvd:0
> HugePages_Surp:0
> Hugepagesize:   2048 kB
> So i can run the third VM success.
> 




Re: [Qemu-devel] [PATCH v4 2/3] monitor: add del completion for peripheral device

2014-10-19 Thread Zhu Guihua
On Sun, 2014-10-19 at 14:37 +0300, Marcel Apfelbaum wrote:
> On Fri, 2014-10-17 at 17:35 +0800, Zhu Guihua wrote:
> > Add peripheral_device_del_completion() to let peripheral device del 
> > completion
> > be possible.
> > 
> > Signed-off-by: Zhu Guihua 
> > ---
> >  monitor.c | 24 
> >  1 file changed, 24 insertions(+)
> > 
> > diff --git a/monitor.c b/monitor.c
> > index 2d14f39..9c3fa01 100644
> > --- a/monitor.c
> > +++ b/monitor.c
> > @@ -4359,6 +4359,29 @@ static void device_del_bus_completion(ReadLineState 
> > *rs,  BusState *bus,
> >  }
> >  }
> >  
> > +static void peripheral_device_del_completion(ReadLineState *rs,
> > + const char *str, size_t len)
> > +{
> > +Object *peripheral;
> > +GSList *list = NULL, *item;
> > +
> > +peripheral = object_resolve_path("/machine/peripheral/", NULL);
> > +if (peripheral == NULL) {
> > +return;
> > +}
> > +
> > +object_child_foreach(peripheral, qdev_build_hotpluggable_device_list,
> > + &list);
> > +
> > +for (item = list; item; item = g_slist_next(item)) {
> > +DeviceState *dev = item->data;
> > +
> > +if (dev->id && !strncmp(str, dev->id, len)) {
> > +readline_add_completion(rs, dev->id);
> > +}
> > +
> Hi,
> 
> Am I missing something or g_slist_free(list)
> should be somewhere here?
> 

Yes, you are right, g_slist_free(list) should be here. 
I am sorry to forget to do this. Thanks for your reminding, I will fix
this.

Regards,
Zhu

> Thanks,
> Marcel
> 
> 
> 
> > +}
> > +
> >  void chardev_remove_completion(ReadLineState *rs, int nb_args, const char 
> > *str)
> >  {
> >  size_t len;
> > @@ -4432,6 +4455,7 @@ void device_del_completion(ReadLineState *rs, int 
> > nb_args, const char *str)
> >  len = strlen(str);
> >  readline_set_completion_index(rs, len);
> >  device_del_bus_completion(rs, sysbus_get_default(), str, len);
> > +peripheral_device_del_completion(rs, str, len);
> >  }
> >  
> >  void object_del_completion(ReadLineState *rs, int nb_args, const char *str)
> 
> 
> 





Re: [Qemu-devel] [PATCH qom v4 00/13] GPIO/IRQ QOMification: Phase 2 - Getting rid of SYSBUS IRQs

2014-10-19 Thread Peter Crosthwaite
On Sat, Oct 18, 2014 at 2:24 AM, Paolo Bonzini  wrote:
> These are the QOM IRQ patches from Peter Crosthwaite.  I and Alex
> made the small changes I requested, so here they are.
>

Sorry for the list inactivity. I am back in the office as of today.

Regards,
Peter

> We tested them with v3 of the platform bus series.  "-device eTSEC"
> works as expected and qom-test's property retrieval loop works fine with
> an eTSEC platform device added to the machine.
>
> Andreas, if you want I can send a pull request for this.
>
> Paolo
>
> Peter Crosthwaite (13):
>   qdev: gpio: Don't allow name share between I and O
>   qdev: gpio: Register GPIO inputs as child objects
>   qdev: gpio: Register GPIO outputs as QOM links
>   qom: Allow clearing of a Link property
>   qom: Demote already-has-a-parent to a regular error
>   qdev: gpio: Re-implement qdev_connect_gpio QOM style
>   qdev: gpio: Add API for intercepting a GPIO
>   qtest/irq: Rework IRQ interception
>   irq: Remove qemu_irq_intercept_out
>   qdev: gpio: delete NamedGPIOList::out
>   qdev: gpio: Remove qdev_init_gpio_out x1 restriction
>   qdev: gpio: Define qdev_pass_gpios()
>   sysbus: Use TYPE_DEVICE GPIO functionality
>
>  hw/core/irq.c  |  8 +
>  hw/core/qdev.c | 95 
> ++
>  hw/core/sysbus.c   | 20 ++-
>  include/hw/irq.h   |  1 -
>  include/hw/qdev-core.h |  6 +++-
>  include/hw/sysbus.h|  7 ++--
>  qom/object.c   | 16 ++---
>  qtest.c| 15 +---
>  8 files changed, 123 insertions(+), 45 deletions(-)
> --
> 2.1.0
>
>



Re: [Qemu-devel] [Consult] microblaze: About running upstream main branch qemu

2014-10-19 Thread Chen Gang
OK, thanks, I shall try it within 2 days.

Send from Lenovo A788t.

Guenter Roeck  wrote:

>On 10/19/2014 06:58 AM, Chen Gang wrote:
>>
>> On 10/19/14 21:38, Max Filippov wrote:
>>> On Sun, Oct 19, 2014 at 5:37 PM, Chen Gang  wrote:
 - running:

   ./arm-softmmu/qemu-system-arm -M arm-generic-fdt -nographic -smp 2 
 -machine linux=on -serial mon:stdio -dtb ../linux-xlnx/system.dtb \
 -kernel ../linux-xlnx/arch/microblaze/boot/linux.bin -initrd 
 ../microblaze_complete.cpio.gz
>>>
>>> qemu-system-microblaze?
>>>
>>
>> OK, thanks.
>>
>> And after correct it, it is still failed:
>>
>>[root@localhost qemu]#./microblazeel-softmmu/qemu-system-microblazeel -M 
>> microblaze-fdt -nographic -smp 1 -machine linux=on -serial mon:stdio -dtb 
>> ../linux-xlnx/system.dtb   -kernel 
>> ../linux-xlnx/arch/microblaze/boot/linux.bin -initrd 
>> ../microblaze_complete.cpio.gz
>>Segmentation fault (core dumped)
>>
>> Originally, I referenced the link below:
>>
>>http://www.wiki.xilinx.com/QEMU
>>
>
>Try the microblaze and microblazeel subdirectories in
>http://server.roeck-us.net/qemu/.
>
>This doesn't use devicetree, but the configurations are known to be working
>with kernel releases all the way back to kernel version 3.10.
>
>Guenter
>


Re: [Qemu-devel] [PATCH 1/2] i386: Add a Virtual Machine Generation ID device

2014-10-19 Thread Michael S. Tsirkin
On Sun, Oct 19, 2014 at 04:43:07PM +0300, Gal Hammer wrote:
> Based on Microsoft's sepecifications (paper can be dowloaded from
> http://go.microsoft.com/fwlink/?LinkId=260709), add a device
> description to the SSDT ACPI table and its implementation.
> 
> The GUID is set using a global "vmgenid.uuid" parameter.
> 
> Signed-off-by: Gal Hammer 
> 
> ---
>  default-configs/i386-softmmu.mak |   1 +
>  default-configs/x86_64-softmmu.mak   |   1 +
>  hw/acpi/core.c   |   8 +++
>  hw/acpi/ich9.c   |   8 +++
>  hw/acpi/piix4.c  |   8 +++
>  hw/i386/acpi-build.c |   8 +++
>  hw/i386/acpi-dsdt.dsl|   4 +-
>  hw/i386/acpi-dsdt.hex.generated  |   6 +-
>  hw/i386/pc.c |   8 +++
>  hw/i386/q35-acpi-dsdt.dsl|   5 +-
>  hw/i386/q35-acpi-dsdt.hex.generated  |   8 +--
>  hw/i386/ssdt-misc.dsl|  36 +++
>  hw/i386/ssdt-misc.hex.generated  |   8 +--
>  hw/isa/lpc_ich9.c|   1 +
>  hw/misc/Makefile.objs|   1 +
>  hw/misc/vmgenid.c| 116 
> +++
>  include/hw/acpi/acpi.h   |   2 +
>  include/hw/acpi/acpi_dev_interface.h |   4 ++
>  include/hw/acpi/ich9.h   |   2 +
>  include/hw/i386/pc.h |   3 +
>  include/hw/misc/vmgenid.h|  21 +++
>  21 files changed, 246 insertions(+), 13 deletions(-)
>  create mode 100644 hw/misc/vmgenid.c
>  create mode 100644 include/hw/misc/vmgenid.h

Please document the host/guest API.
It seems that you are using a hard-coded hardware address,
and using up a GPE.


> 
> diff --git a/default-configs/i386-softmmu.mak 
> b/default-configs/i386-softmmu.mak
> index 8e08841..bd33c75 100644
> --- a/default-configs/i386-softmmu.mak
> +++ b/default-configs/i386-softmmu.mak
> @@ -45,3 +45,4 @@ CONFIG_IOAPIC=y
>  CONFIG_ICC_BUS=y
>  CONFIG_PVPANIC=y
>  CONFIG_MEM_HOTPLUG=y
> +CONFIG_VMGENID=y
> diff --git a/default-configs/x86_64-softmmu.mak 
> b/default-configs/x86_64-softmmu.mak
> index 66557ac..006fc7c 100644
> --- a/default-configs/x86_64-softmmu.mak
> +++ b/default-configs/x86_64-softmmu.mak
> @@ -45,3 +45,4 @@ CONFIG_IOAPIC=y
>  CONFIG_ICC_BUS=y
>  CONFIG_PVPANIC=y
>  CONFIG_MEM_HOTPLUG=y
> +CONFIG_VMGENID=y
> diff --git a/hw/acpi/core.c b/hw/acpi/core.c
> index a7368fb..a01c980 100644
> --- a/hw/acpi/core.c
> +++ b/hw/acpi/core.c
> @@ -28,6 +28,8 @@
>  #include "qapi-visit.h"
>  #include "qapi-event.h"
>  
> +#define ACPI_VM_GENERATION_ID_CHANGED_STATUS 1
> +
>  struct acpi_table_header {
>  uint16_t _length; /* our length, not actual part of the hdr */
>/* allows easier parsing for fw_cfg clients */
> @@ -680,3 +682,9 @@ void acpi_update_sci(ACPIREGS *regs, qemu_irq irq)
> (regs->pm1.evt.en & ACPI_BITMASK_TIMER_ENABLE) &&
> !(pm1a_sts & ACPI_BITMASK_TIMER_STATUS));
>  }
> +
> +void acpi_vm_generation_id_changed(ACPIREGS *acpi_regs, qemu_irq irq)
> +{
> +acpi_regs->gpe.sts[0] |= ACPI_VM_GENERATION_ID_CHANGED_STATUS;
> +acpi_update_sci(acpi_regs, irq);
> +}
> diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
> index 7b14bbb..5501c0e 100644
> --- a/hw/acpi/ich9.c
> +++ b/hw/acpi/ich9.c
> @@ -316,3 +316,11 @@ void ich9_pm_ospm_status(AcpiDeviceIf *adev, 
> ACPIOSTInfoList ***list)
>  
>  acpi_memory_ospm_status(&s->pm.acpi_memory_hotplug, list);
>  }
> +
> +void ich9_vm_generation_id_changed(AcpiDeviceIf *adev)
> +{
> +ICH9LPCState *s = ICH9_LPC_DEVICE(adev);
> +ICH9LPCPMRegs *pm = &s->pm;
> +
> +acpi_vm_generation_id_changed(&pm->acpi_regs, pm->irq);
> +}
> diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
> index 0bfa814..ad0ef68 100644
> --- a/hw/acpi/piix4.c
> +++ b/hw/acpi/piix4.c
> @@ -580,6 +580,13 @@ static void piix4_ospm_status(AcpiDeviceIf *adev, 
> ACPIOSTInfoList ***list)
>  acpi_memory_ospm_status(&s->acpi_memory_hotplug, list);
>  }
>  
> +static void piix4_vm_generation_id_changed(AcpiDeviceIf *adev)
> +{
> +PIIX4PMState *s = PIIX4_PM(adev);
> +
> +acpi_vm_generation_id_changed(&s->ar, s->irq);
> +}
> +
>  static Property piix4_pm_properties[] = {
>  DEFINE_PROP_UINT32("smb_io_base", PIIX4PMState, smb_io_base, 0),
>  DEFINE_PROP_UINT8(ACPI_PM_PROP_S3_DISABLED, PIIX4PMState, disable_s3, 0),
> @@ -617,6 +624,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void 
> *data)
>  hc->plug = piix4_device_plug_cb;
>  hc->unplug_request = piix4_device_unplug_request_cb;
>  adevc->ospm_status = piix4_ospm_status;
> +adevc->vm_generation_id_changed = piix4_vm_generation_id_changed;
>  }
>  
>  static const TypeInfo piix4_pm_info = {
> diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
> index 00be4bb..27d0494 100644
> --- a/hw/i386/acpi-build.c
> +++ b/hw/i386/acpi-build.c
> @@ -42,6 +42,7 @@
>  #include "hw/acpi/memory_hotplug.h"
>  #include "sysemu/tp

Re: [Qemu-devel] [Consult] microblaze: About running upstream main branch qemu

2014-10-19 Thread Guenter Roeck

On 10/19/2014 06:58 AM, Chen Gang wrote:


On 10/19/14 21:38, Max Filippov wrote:

On Sun, Oct 19, 2014 at 5:37 PM, Chen Gang  wrote:

- running:

  ./arm-softmmu/qemu-system-arm -M arm-generic-fdt -nographic -smp 2 
-machine linux=on -serial mon:stdio -dtb ../linux-xlnx/system.dtb \
-kernel ../linux-xlnx/arch/microblaze/boot/linux.bin -initrd 
../microblaze_complete.cpio.gz


qemu-system-microblaze?



OK, thanks.

And after correct it, it is still failed:

   [root@localhost qemu]#./microblazeel-softmmu/qemu-system-microblazeel -M 
microblaze-fdt -nographic -smp 1 -machine linux=on -serial mon:stdio -dtb 
../linux-xlnx/system.dtb   -kernel 
../linux-xlnx/arch/microblaze/boot/linux.bin -initrd 
../microblaze_complete.cpio.gz
   Segmentation fault (core dumped)

Originally, I referenced the link below:

   http://www.wiki.xilinx.com/QEMU



Try the microblaze and microblazeel subdirectories in
http://server.roeck-us.net/qemu/.

This doesn't use devicetree, but the configurations are known to be working
with kernel releases all the way back to kernel version 3.10.

Guenter




Re: [Qemu-devel] [Consult] microblaze: About running upstream main branch qemu

2014-10-19 Thread Chen Gang

On 10/19/14 21:38, Max Filippov wrote:
> On Sun, Oct 19, 2014 at 5:37 PM, Chen Gang  wrote:
>>- running:
>>
>>  ./arm-softmmu/qemu-system-arm -M arm-generic-fdt -nographic -smp 2 
>> -machine linux=on -serial mon:stdio -dtb ../linux-xlnx/system.dtb \
>>-kernel ../linux-xlnx/arch/microblaze/boot/linux.bin -initrd 
>> ../microblaze_complete.cpio.gz
> 
> qemu-system-microblaze?
> 

OK, thanks.

And after correct it, it is still failed:

  [root@localhost qemu]#./microblazeel-softmmu/qemu-system-microblazeel -M 
microblaze-fdt -nographic -smp 1 -machine linux=on -serial mon:stdio -dtb 
../linux-xlnx/system.dtb   -kernel 
../linux-xlnx/arch/microblaze/boot/linux.bin -initrd 
../microblaze_complete.cpio.gz
  Segmentation fault (core dumped)

Originally, I referenced the link below:

  http://www.wiki.xilinx.com/QEMU



Thanks
-- 
Chen Gang

Open, share, and attitude like air, water, and life which God blessed



[Qemu-devel] [PATCH 2/2] tests: update acpi tables after adding the vmgenid device

2014-10-19 Thread Gal Hammer
---
 tests/acpi-test-data/pc/DSDT  | Bin 2807 -> 2820 bytes
 tests/acpi-test-data/pc/SSDT  | Bin 3065 -> 3239 bytes
 tests/acpi-test-data/q35/DSDT | Bin 7397 -> 7410 bytes
 tests/acpi-test-data/q35/SSDT | Bin 1346 -> 1520 bytes
 4 files changed, 0 insertions(+), 0 deletions(-)

diff --git a/tests/acpi-test-data/pc/DSDT b/tests/acpi-test-data/pc/DSDT
index 
d37ec34454e6f3db5e91b777f94e03be67a5f583..8c86b1e39454f611ee25ababe0ef429956ba7562
 100644
GIT binary patch
delta 65
zcmew^+9Jl~66_Mf!p*?I=(mwef=knhFWx=Cl_TE6(}ma3Il$Avz`%?_L^$5nz<{AG
UMlU|tDL%~C-IJ?fvK`k>0P4aI+5i9m

delta 52
zcmZn>`!34m66_N4or{5iv0)>Z1ec5}Z@hbeD@VMCrwgy6bAYFTfq@x=2wS|5fdRv0
HZ?2sHWr7Ua

diff --git a/tests/acpi-test-data/pc/SSDT b/tests/acpi-test-data/pc/SSDT
index 
eb2d8b698ce6a3a910a05244a3b6cf80bf818fb9..1f72e5736d825ffbf0bc5d55360f4bfe27cc198b
 100644
GIT binary patch
delta 199
zcmewr}*e57p^d0cTbLZ4^J1~Kv&;T0|Nsi
z299`VAUDi6-aR!h-Z{TCuOzhyDCpwihani|?&%0LmVqI<0myV=0J87@;}QUx5ELvZ
z$i>&j36i}3PXI_(GKfgT2ZuN^6zGApB`jc@oWQVzVIj*BE`AXiM;DhMjshVjri22J
WY)1kELjgla0wcqcgw08e)42erhcprZ

delta 24
fcmZ23`BR)LIM^lRCpQBFWA{d`NXE^AOw+gkVtNN8

diff --git a/tests/acpi-test-data/q35/DSDT b/tests/acpi-test-data/q35/DSDT
index 
2d2bc4adaf54666fe7864e7f39203411b7c308f7..b45f6065ad6140bd987550cc22ff38b09243188f
 100644
GIT binary patch
delta 65
zcmaEA`N@*YCDm1_lgm
UF?#XAPVr&B?w(u?ljCLD0C3V0KL7v#

delta 52
zcmexl`P7ojCD_A0|SQ1
HIWlbke7y}n

diff --git a/tests/acpi-test-data/q35/SSDT b/tests/acpi-test-data/q35/SSDT
index 
778b79bf428b5d7602b7b80c9434e38c79718bb2..5ca654bd91354a7ff023b0a380015612d53f5dd6
 100644
GIT binary patch
delta 199
zcmX@a^?{o!IM^lR11kdq@*!KyH|CynAY1ymNkOUP)>ZP|(H24?{4_-P4hW0SKZSfJ_$#Ap8D5E&-qkLBWE8
zTzqYuAj$jx1b}2EgNQ_YaEK#AfgVU(!UDF*2@FdZ7P2hi;un!|ba4sdC=g;|N+

[Qemu-devel] [PATCH 1/2] i386: Add a Virtual Machine Generation ID device

2014-10-19 Thread Gal Hammer
Based on Microsoft's sepecifications (paper can be dowloaded from
http://go.microsoft.com/fwlink/?LinkId=260709), add a device
description to the SSDT ACPI table and its implementation.

The GUID is set using a global "vmgenid.uuid" parameter.

Signed-off-by: Gal Hammer 

---
 default-configs/i386-softmmu.mak |   1 +
 default-configs/x86_64-softmmu.mak   |   1 +
 hw/acpi/core.c   |   8 +++
 hw/acpi/ich9.c   |   8 +++
 hw/acpi/piix4.c  |   8 +++
 hw/i386/acpi-build.c |   8 +++
 hw/i386/acpi-dsdt.dsl|   4 +-
 hw/i386/acpi-dsdt.hex.generated  |   6 +-
 hw/i386/pc.c |   8 +++
 hw/i386/q35-acpi-dsdt.dsl|   5 +-
 hw/i386/q35-acpi-dsdt.hex.generated  |   8 +--
 hw/i386/ssdt-misc.dsl|  36 +++
 hw/i386/ssdt-misc.hex.generated  |   8 +--
 hw/isa/lpc_ich9.c|   1 +
 hw/misc/Makefile.objs|   1 +
 hw/misc/vmgenid.c| 116 +++
 include/hw/acpi/acpi.h   |   2 +
 include/hw/acpi/acpi_dev_interface.h |   4 ++
 include/hw/acpi/ich9.h   |   2 +
 include/hw/i386/pc.h |   3 +
 include/hw/misc/vmgenid.h|  21 +++
 21 files changed, 246 insertions(+), 13 deletions(-)
 create mode 100644 hw/misc/vmgenid.c
 create mode 100644 include/hw/misc/vmgenid.h

diff --git a/default-configs/i386-softmmu.mak b/default-configs/i386-softmmu.mak
index 8e08841..bd33c75 100644
--- a/default-configs/i386-softmmu.mak
+++ b/default-configs/i386-softmmu.mak
@@ -45,3 +45,4 @@ CONFIG_IOAPIC=y
 CONFIG_ICC_BUS=y
 CONFIG_PVPANIC=y
 CONFIG_MEM_HOTPLUG=y
+CONFIG_VMGENID=y
diff --git a/default-configs/x86_64-softmmu.mak 
b/default-configs/x86_64-softmmu.mak
index 66557ac..006fc7c 100644
--- a/default-configs/x86_64-softmmu.mak
+++ b/default-configs/x86_64-softmmu.mak
@@ -45,3 +45,4 @@ CONFIG_IOAPIC=y
 CONFIG_ICC_BUS=y
 CONFIG_PVPANIC=y
 CONFIG_MEM_HOTPLUG=y
+CONFIG_VMGENID=y
diff --git a/hw/acpi/core.c b/hw/acpi/core.c
index a7368fb..a01c980 100644
--- a/hw/acpi/core.c
+++ b/hw/acpi/core.c
@@ -28,6 +28,8 @@
 #include "qapi-visit.h"
 #include "qapi-event.h"
 
+#define ACPI_VM_GENERATION_ID_CHANGED_STATUS 1
+
 struct acpi_table_header {
 uint16_t _length; /* our length, not actual part of the hdr */
   /* allows easier parsing for fw_cfg clients */
@@ -680,3 +682,9 @@ void acpi_update_sci(ACPIREGS *regs, qemu_irq irq)
(regs->pm1.evt.en & ACPI_BITMASK_TIMER_ENABLE) &&
!(pm1a_sts & ACPI_BITMASK_TIMER_STATUS));
 }
+
+void acpi_vm_generation_id_changed(ACPIREGS *acpi_regs, qemu_irq irq)
+{
+acpi_regs->gpe.sts[0] |= ACPI_VM_GENERATION_ID_CHANGED_STATUS;
+acpi_update_sci(acpi_regs, irq);
+}
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index 7b14bbb..5501c0e 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -316,3 +316,11 @@ void ich9_pm_ospm_status(AcpiDeviceIf *adev, 
ACPIOSTInfoList ***list)
 
 acpi_memory_ospm_status(&s->pm.acpi_memory_hotplug, list);
 }
+
+void ich9_vm_generation_id_changed(AcpiDeviceIf *adev)
+{
+ICH9LPCState *s = ICH9_LPC_DEVICE(adev);
+ICH9LPCPMRegs *pm = &s->pm;
+
+acpi_vm_generation_id_changed(&pm->acpi_regs, pm->irq);
+}
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index 0bfa814..ad0ef68 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -580,6 +580,13 @@ static void piix4_ospm_status(AcpiDeviceIf *adev, 
ACPIOSTInfoList ***list)
 acpi_memory_ospm_status(&s->acpi_memory_hotplug, list);
 }
 
+static void piix4_vm_generation_id_changed(AcpiDeviceIf *adev)
+{
+PIIX4PMState *s = PIIX4_PM(adev);
+
+acpi_vm_generation_id_changed(&s->ar, s->irq);
+}
+
 static Property piix4_pm_properties[] = {
 DEFINE_PROP_UINT32("smb_io_base", PIIX4PMState, smb_io_base, 0),
 DEFINE_PROP_UINT8(ACPI_PM_PROP_S3_DISABLED, PIIX4PMState, disable_s3, 0),
@@ -617,6 +624,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void 
*data)
 hc->plug = piix4_device_plug_cb;
 hc->unplug_request = piix4_device_unplug_request_cb;
 adevc->ospm_status = piix4_ospm_status;
+adevc->vm_generation_id_changed = piix4_vm_generation_id_changed;
 }
 
 static const TypeInfo piix4_pm_info = {
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 00be4bb..27d0494 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -42,6 +42,7 @@
 #include "hw/acpi/memory_hotplug.h"
 #include "sysemu/tpm.h"
 #include "hw/acpi/tpm.h"
+#include "hw/misc/vmgenid.h"
 
 /* Supported chipsets: */
 #include "hw/acpi/piix4.h"
@@ -96,6 +97,7 @@ typedef struct AcpiMiscInfo {
 const unsigned char *dsdt_code;
 unsigned dsdt_size;
 uint16_t pvpanic_port;
+bool vm_generation_id_set;
 } AcpiMiscInfo;
 
 typedef struct AcpiBuildPciBusHotplugState {
@@ -216,6 +218,7 @@ static void acpi_get_misc_info(AcpiMiscInfo *info)
 info->has_h

[Qemu-devel] [PATCH V6 0/2] Virtual Machine Generation ID

2014-10-19 Thread Gal Hammer
Hi,

A two parts patch to add a QEmu support for Microsoft's Virtual Machine
Generation ID device.

The first one is the ACPI tables changes and the actual device and the
second patch updates the tests' ACPI tables.

Your comment are welcomed.

Thanks,

Gal.

V6 - Move the device's description back to the static SSDT table.
   - The GUID is store in a "hard coded" physical address and not
 in the ACPI table itself.
   - ACPI notification is triggered when the GUID is changed.

V5 - include the pre-compiled ASL file
   - remove an empty line at end of files.

V4 - Move device's description to SSDT table (dynamic).

V3 - Fix a typo in error message string.
   - Move device's description from DSDT back to SSDT table.

V2 - Remove "-uuid" command line parameter.
   - Move device's description from SSDT to DSDT table.
   - Add new "vmgenid" sysbus device.


Gal Hammer (2):
  i386: Add a Virtual Machine Generation ID device
  tests: update acpi tables after adding the vmgenid device

 default-configs/i386-softmmu.mak |   1 +
 default-configs/x86_64-softmmu.mak   |   1 +
 hw/acpi/core.c   |   8 +++
 hw/acpi/ich9.c   |   8 +++
 hw/acpi/piix4.c  |   8 +++
 hw/i386/acpi-build.c |   8 +++
 hw/i386/acpi-dsdt.dsl|   4 +-
 hw/i386/acpi-dsdt.hex.generated  |   6 +-
 hw/i386/pc.c |   8 +++
 hw/i386/q35-acpi-dsdt.dsl|   5 +-
 hw/i386/q35-acpi-dsdt.hex.generated  |   8 +--
 hw/i386/ssdt-misc.dsl|  36 +++
 hw/i386/ssdt-misc.hex.generated  |   8 +--
 hw/isa/lpc_ich9.c|   1 +
 hw/misc/Makefile.objs|   1 +
 hw/misc/vmgenid.c| 116 +++
 include/hw/acpi/acpi.h   |   2 +
 include/hw/acpi/acpi_dev_interface.h |   4 ++
 include/hw/acpi/ich9.h   |   2 +
 include/hw/i386/pc.h |   3 +
 include/hw/misc/vmgenid.h|  21 +++
 tests/acpi-test-data/pc/DSDT | Bin 2807 -> 2820 bytes
 tests/acpi-test-data/pc/SSDT | Bin 3065 -> 3239 bytes
 tests/acpi-test-data/q35/DSDT| Bin 7397 -> 7410 bytes
 tests/acpi-test-data/q35/SSDT| Bin 1346 -> 1520 bytes
 25 files changed, 246 insertions(+), 13 deletions(-)
 create mode 100644 hw/misc/vmgenid.c
 create mode 100644 include/hw/misc/vmgenid.h

-- 
1.9.3




Re: [Qemu-devel] [Consult] microblaze: About running upstream main branch qemu

2014-10-19 Thread Max Filippov
On Sun, Oct 19, 2014 at 5:37 PM, Chen Gang  wrote:
>- running:
>
>  ./arm-softmmu/qemu-system-arm -M arm-generic-fdt -nographic -smp 2 
> -machine linux=on -serial mon:stdio -dtb ../linux-xlnx/system.dtb \
>-kernel ../linux-xlnx/arch/microblaze/boot/linux.bin -initrd 
> ../microblaze_complete.cpio.gz

qemu-system-microblaze?

-- 
Thanks.
-- Max



[Qemu-devel] [Consult] microblaze: About running upstream main branch qemu

2014-10-19 Thread Chen Gang
Hello microblaze maintainers:

I tried to run upstream qemu for microblaze, but failed. It seems only
special qemu branch can run it. And after try Xilinx qemu branch, I got
"segment fault" which is related with "-dtb".  So I want to consult:

 - Can our main upstream qemu (not other branches) run microblaze?

 - Is what I have done for Xilinx related qemu correct?

The related operation for Xilinx related qemu is below:

 * For kernel:

   - building:

 git clone https://github.com/Xilinx/linux-xlnx.git
 cd linux-xlnx
 make ARCH=microblaze 
CROSS_COMPILE=/upstream/release/bin/microblaze-gchen-linux- mmu_config
 make ARCH=microblaze 
CROSS_COMPILE=/upstream/release/bin/microblaze-gchen-linux-

   - output:

 arch/microblaze/boot/linux.bin
 scripts/dtc/dtc

   - get 'system.dtb':

 ./scripts/dtc/dtc -I dts -O dtb -o system.dtb 
arch/microblaze/boot/dts/system.dts

 * For ramdisk:

   wget 
http://www.wiki.xilinx.com/file/view/microblaze_complete.cpio.gz/419243588/microblaze_complete.cpio.gz

 * For qemu:

   - building:

 git clone git://github.com/Xilinx/qemu.git
 ./configure --target-list="arm-softmmu,microblazeel-softmmu" --enable-fdt 
--disable-kvm
 make

   - running:

 ./arm-softmmu/qemu-system-arm -M arm-generic-fdt -nographic -smp 2 
-machine linux=on -serial mon:stdio -dtb ../linux-xlnx/system.dtb \
   -kernel ../linux-xlnx/arch/microblaze/boot/linux.bin -initrd 
../microblaze_complete.cpio.gz

   - result:

 Segmentation fault (core dumped) (after a simple try, I am sure, it is 
caused by "dtb").

And excuse me, at present, I have to only focus on fixing upstream qemu
issues (I have no enough time resource for other qemu branches), please
understand.

Thanks.
-- 
Chen Gang

Open, share, and attitude like air, water, and life which God blessed



[Qemu-devel] [PATCH] target-xtensa: fix build for cores w/o windowed registers

2014-10-19 Thread Max Filippov
Cores without windowed registers don't have window overflow/underflow
vectors. Move these vectors to a separate group defined conditionally.

Signed-off-by: Max Filippov 
---
 target-xtensa/overlay_tool.h | 31 +++
 1 file changed, 19 insertions(+), 12 deletions(-)

diff --git a/target-xtensa/overlay_tool.h b/target-xtensa/overlay_tool.h
index 5a1353e..6105d4c 100644
--- a/target-xtensa/overlay_tool.h
+++ b/target-xtensa/overlay_tool.h
@@ -108,20 +108,27 @@
 #define XCHAL_WINDOW_UF12_VECOFS0x0140
 #endif
 
+#if XCHAL_HAVE_WINDOWED
+#define WINDOW_VECTORS \
+   [EXC_WINDOW_OVERFLOW4] = XCHAL_WINDOW_OF4_VECOFS + \
+   XCHAL_WINDOW_VECTORS_VADDR, \
+   [EXC_WINDOW_UNDERFLOW4] = XCHAL_WINDOW_UF4_VECOFS + \
+   XCHAL_WINDOW_VECTORS_VADDR, \
+   [EXC_WINDOW_OVERFLOW8] = XCHAL_WINDOW_OF8_VECOFS + \
+   XCHAL_WINDOW_VECTORS_VADDR, \
+   [EXC_WINDOW_UNDERFLOW8] = XCHAL_WINDOW_UF8_VECOFS + \
+   XCHAL_WINDOW_VECTORS_VADDR, \
+   [EXC_WINDOW_OVERFLOW12] = XCHAL_WINDOW_OF12_VECOFS + \
+   XCHAL_WINDOW_VECTORS_VADDR, \
+   [EXC_WINDOW_UNDERFLOW12] = XCHAL_WINDOW_UF12_VECOFS + \
+   XCHAL_WINDOW_VECTORS_VADDR,
+#else
+#define WINDOW_VECTORS
+#endif
+
 #define EXCEPTION_VECTORS { \
 [EXC_RESET] = XCHAL_RESET_VECTOR_VADDR, \
-[EXC_WINDOW_OVERFLOW4] = XCHAL_WINDOW_OF4_VECOFS + \
-XCHAL_WINDOW_VECTORS_VADDR, \
-[EXC_WINDOW_UNDERFLOW4] = XCHAL_WINDOW_UF4_VECOFS + \
-XCHAL_WINDOW_VECTORS_VADDR, \
-[EXC_WINDOW_OVERFLOW8] = XCHAL_WINDOW_OF8_VECOFS + \
-XCHAL_WINDOW_VECTORS_VADDR, \
-[EXC_WINDOW_UNDERFLOW8] = XCHAL_WINDOW_UF8_VECOFS + \
-XCHAL_WINDOW_VECTORS_VADDR, \
-[EXC_WINDOW_OVERFLOW12] = XCHAL_WINDOW_OF12_VECOFS + \
-XCHAL_WINDOW_VECTORS_VADDR, \
-[EXC_WINDOW_UNDERFLOW12] = XCHAL_WINDOW_UF12_VECOFS + \
-XCHAL_WINDOW_VECTORS_VADDR, \
+WINDOW_VECTORS \
 [EXC_KERNEL] = XCHAL_KERNEL_VECTOR_VADDR, \
 [EXC_USER] = XCHAL_USER_VECTOR_VADDR, \
 [EXC_DOUBLE] = XCHAL_DOUBLEEXC_VECTOR_VADDR, \
-- 
1.8.1.4




[Qemu-devel] [PATCH] tests: fix acpi tables regeneration script

2014-10-19 Thread Marcel Apfelbaum
Commit: 501f28ca9db08e84819b26314525b6369e7704dd
tests: rename acpi-test to bios-tables-test

modified acpi-test file from acpi-test.c to bios-tables-test.c.
However, the tables regeneration script was not modified
accordingly.

Fixed the test name also in rebuild-expected-aml.sh script.

Signed-off-by: Marcel Apfelbaum 
---
 tests/acpi-test-data/rebuild-expected-aml.sh | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/tests/acpi-test-data/rebuild-expected-aml.sh 
b/tests/acpi-test-data/rebuild-expected-aml.sh
index ab98498..e7ce2b3 100755
--- a/tests/acpi-test-data/rebuild-expected-aml.sh
+++ b/tests/acpi-test-data/rebuild-expected-aml.sh
@@ -12,6 +12,7 @@
 # See the COPYING.LIB file in the top-level directory.
 
 qemu=
+acpi_test_path="tests/bios-tables-test"
 
 if [ -e x86_64-softmmu/qemu-system-x86_64 ]; then
 qemu="x86_64-softmmu/qemu-system-x86_64"
@@ -23,13 +24,13 @@ else
 exit 1;
 fi
 
-if [ ! -e "tests/acpi-test" ]; then
-echo "Test: acpi-test is required! Run make check before this script."
+if [ ! -e $acpi_test_path ]; then
+echo "Test: $acpi_test_path is required! Run make check before this 
script."
 echo "Run this script from the build directory."
 exit 1;
 fi
 
-TEST_ACPI_REBUILD_AML=y QTEST_QEMU_BINARY=$qemu tests/acpi-test
+TEST_ACPI_REBUILD_AML=y QTEST_QEMU_BINARY=$qemu $acpi_test_path
 
 echo "The files were rebuilt and can be added to git."
 echo "However, if new files were created, please copy them manually" \
-- 
1.8.3.1




Re: [Qemu-devel] [PATCH v4 2/3] monitor: add del completion for peripheral device

2014-10-19 Thread Marcel Apfelbaum
On Fri, 2014-10-17 at 17:35 +0800, Zhu Guihua wrote:
> Add peripheral_device_del_completion() to let peripheral device del completion
> be possible.
> 
> Signed-off-by: Zhu Guihua 
> ---
>  monitor.c | 24 
>  1 file changed, 24 insertions(+)
> 
> diff --git a/monitor.c b/monitor.c
> index 2d14f39..9c3fa01 100644
> --- a/monitor.c
> +++ b/monitor.c
> @@ -4359,6 +4359,29 @@ static void device_del_bus_completion(ReadLineState 
> *rs,  BusState *bus,
>  }
>  }
>  
> +static void peripheral_device_del_completion(ReadLineState *rs,
> + const char *str, size_t len)
> +{
> +Object *peripheral;
> +GSList *list = NULL, *item;
> +
> +peripheral = object_resolve_path("/machine/peripheral/", NULL);
> +if (peripheral == NULL) {
> +return;
> +}
> +
> +object_child_foreach(peripheral, qdev_build_hotpluggable_device_list,
> + &list);
> +
> +for (item = list; item; item = g_slist_next(item)) {
> +DeviceState *dev = item->data;
> +
> +if (dev->id && !strncmp(str, dev->id, len)) {
> +readline_add_completion(rs, dev->id);
> +}
> +
Hi,

Am I missing something or g_slist_free(list)
should be somewhere here?

Thanks,
Marcel



> +}
> +
>  void chardev_remove_completion(ReadLineState *rs, int nb_args, const char 
> *str)
>  {
>  size_t len;
> @@ -4432,6 +4455,7 @@ void device_del_completion(ReadLineState *rs, int 
> nb_args, const char *str)
>  len = strlen(str);
>  readline_set_completion_index(rs, len);
>  device_del_bus_completion(rs, sysbus_get_default(), str, len);
> +peripheral_device_del_completion(rs, str, len);
>  }
>  
>  void object_del_completion(ReadLineState *rs, int nb_args, const char *str)






[Qemu-devel] [PATCH] target-xtensa: add core importing script

2014-10-19 Thread Max Filippov
This script copies configuration and gdb information from the xtensa
configuration overlay archive and registers new xtensa core.

Signed-off-by: Max Filippov 
---
 target-xtensa/import_core.sh | 53 
 1 file changed, 53 insertions(+)
 create mode 100755 target-xtensa/import_core.sh

diff --git a/target-xtensa/import_core.sh b/target-xtensa/import_core.sh
new file mode 100755
index 000..73791ec
--- /dev/null
+++ b/target-xtensa/import_core.sh
@@ -0,0 +1,53 @@
+#! /bin/bash -e
+
+OVERLAY="$1"
+NAME="$2"
+FREQ=4
+BASE=$(dirname "$0")
+TARGET="$BASE"/core-$NAME
+
+[ $# -ge 2 -a -f "$OVERLAY" ] || { cat < "$TARGET"/gdb-config.c
+NUM_REGS=$(grep XTREG "$TARGET"/gdb-config.c | wc -l)
+
+cat < "${TARGET}.c"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-$NAME/core-isa.h"
+#include "overlay_tool.h"
+
+static const XtensaConfig $NAME __attribute__((unused)) = {
+.name = "$NAME",
+.gdb_regmap = {
+.num_regs = $NUM_REGS,
+.reg = {
+#include "core-$NAME/gdb-config.c"
+}
+},
+.clock_freq_khz = $FREQ,
+DEFAULT_SECTIONS
+};
+
+REGISTER_CORE($NAME)
+EOF
+
+grep -q core-${NAME}.o "$BASE"/Makefile.objs || \
+echo "obj-y += core-${NAME}.o" >> "$BASE"/Makefile.objs
-- 
1.8.1.4