[RFC PATCH] qemu-img: convert: introduce compression_level option

2023-12-09 Thread Yifan Zhao
This patch introduces a new parameter to customize the compression level
in qemu-img convert.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1959
Signed-off-by: Yifan Zhao 
---
This patch adds a new compression_level option to qemu-img: convert,
allowing the user to specify the compression level to use.
It resolves an existing issue referenced in the commit message.

However, as a newbie to qemu, I'm not quite sure how to pass this new option
from qcow2_co_create to the compression function qcow2_co_do_compress.
Currently, the compression_level is written directly into the header of the
qcow2 file, which I don't think is appropriate.

I would appreciate getting more suggestions on how to implement this better.

 block/qcow2-threads.c| 80 ++--
 block/qcow2.c| 22 -
 block/qcow2.h|  8 +++-
 include/block/block_int-common.h |  1 +
 qapi/block-core.json | 29 ++--
 5 files changed, 108 insertions(+), 32 deletions(-)

diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
index d6071a1eae..bc375384a9 100644
--- a/block/qcow2-threads.c
+++ b/block/qcow2-threads.c
@@ -67,7 +67,27 @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, 
void *arg)
  */
 
 typedef ssize_t (*Qcow2CompressFunc)(void *dest, size_t dest_size,
+ const void *src, size_t src_size,
+ int compression_level);
+typedef ssize_t (*Qcow2DecompressFunc)(void *dest, size_t dest_size,
  const void *src, size_t src_size);
+
+enum Qcow2CompressFuncType {
+QCOW2_COMPRESS_FUNC,
+QCOW2_DECOMPRESS_FUNC,
+};
+
+typedef struct Qcow2CompressFuncUnion {
+enum Qcow2CompressFuncType type;
+union {
+struct {
+Qcow2CompressFunc f;
+int compression_level;
+} cfunc;
+Qcow2DecompressFunc dfunc;
+} u;
+} Qcow2CompressFuncUnion;
+
 typedef struct Qcow2CompressData {
 void *dest;
 size_t dest_size;
@@ -75,7 +95,7 @@ typedef struct Qcow2CompressData {
 size_t src_size;
 ssize_t ret;
 
-Qcow2CompressFunc func;
+Qcow2CompressFuncUnion func;
 } Qcow2CompressData;
 
 /*
@@ -85,20 +105,26 @@ typedef struct Qcow2CompressData {
  *
  * @dest - destination buffer, @dest_size bytes
  * @src - source buffer, @src_size bytes
+ * @level - compression level
  *
  * Returns: compressed size on success
  *  -ENOMEM destination buffer is not enough to store compressed data
  *  -EIOon any other error
  */
 static ssize_t qcow2_zlib_compress(void *dest, size_t dest_size,
-   const void *src, size_t src_size)
+   const void *src, size_t src_size, int level)
 {
 ssize_t ret;
 z_stream strm;
 
+if (level == DEFAULT_COMPRESSION_LEVEL || 
+level < Z_BEST_SPEED || level > Z_BEST_COMPRESSION) {
+level = Z_DEFAULT_COMPRESSION;
+}
+
 /* best compression, small window, no zlib header */
 memset(, 0, sizeof(strm));
-ret = deflateInit2(, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
+ret = deflateInit2(, level, Z_DEFLATED,
-12, 9, Z_DEFAULT_STRATEGY);
 if (ret != Z_OK) {
 return -EIO;
@@ -180,13 +206,14 @@ static ssize_t qcow2_zlib_decompress(void *dest, size_t 
dest_size,
  *
  * @dest - destination buffer, @dest_size bytes
  * @src - source buffer, @src_size bytes
+ * @level - compression level
  *
  * Returns: compressed size on success
  *  -ENOMEM destination buffer is not enough to store compressed data
  *  -EIOon any other error
  */
 static ssize_t qcow2_zstd_compress(void *dest, size_t dest_size,
-   const void *src, size_t src_size)
+   const void *src, size_t src_size, int level)
 {
 ssize_t ret;
 size_t zstd_ret;
@@ -200,11 +227,22 @@ static ssize_t qcow2_zstd_compress(void *dest, size_t 
dest_size,
 .size = src_size,
 .pos = 0
 };
-ZSTD_CCtx *cctx = ZSTD_createCCtx();
+ZSTD_CCtx *cctx;
 
+if (level == DEFAULT_COMPRESSION_LEVEL ||
+level < ZSTD_minCLevel() || level > ZSTD_maxCLevel()) {
+level = ZSTD_CLEVEL_DEFAULT;
+}
+
+cctx = ZSTD_createCCtx();
 if (!cctx) {
 return -EIO;
 }
+
+zstd_ret = ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, level);
+if (ZSTD_isError(zstd_ret)) {
+return -EIO;
+}
 /*
  * Use the zstd streamed interface for symmetry with decompression,
  * where streaming is essential since we don't record the exact
@@ -329,15 +367,22 @@ static int qcow2_compress_pool_func(void *opaque)
 {
 Qcow2CompressData *data = opaque;
 
-data->ret = data->func(data->dest, data->dest_size,
-   data->src, data->src_size);
+if (data->

Re: [PATCH] accel/tcg: fix race in cpu_exec_step_atomic (bug 1863025)

2020-02-14 Thread Yifan Lu
What race are you thinking of in my patch? The obvious race I can
think of is benign:

Case 1:
A: does TB flush
B: read tb_flush_count
A: increment tb_flush_count
A: end_exclusive
B: tb_lookup__cpu_state/tb_gen_code
B: start_exclusive
B: read tb_flush_count again (increment seen)
B: retries

Case 2:
B: read tb_flush_count
A: does TB flush
A: increment tb_flush_count
A: end_exclusive
B: tb_lookup__cpu_state/tb_gen_code
B: start_exclusive
B: read tb_flush_count again (increment seen)
B: retries

Case 3:
A: does TB flush
A: increment tb_flush_count
A: end_exclusive
B: read tb_flush_count
B: tb_lookup__cpu_state/tb_gen_code
B: start_exclusive
B: read tb_flush_count again (no increment seen)
B: proceeds

Case 1 is the expected case. Case 2, we thought TB was stale but it
wasn't so we get it again with tb_lookup__cpu_state with minimal extra
overhead.

Case 3 seems to be bad because we could read tb_flush_count and find
it already incremented. But if so that means thread A is at the end of
do_tb_flush and the lookup tables are already cleared and the TCG
context is already reset. So it should be safe for thread B to call
tb_lookup__cpu_state or tb_gen_code.

Yifan

On Fri, Feb 14, 2020 at 3:31 PM Richard Henderson
 wrote:
>
> On 2/14/20 6:49 AM, Alex Bennée wrote:
> > The bug describes a race whereby cpu_exec_step_atomic can acquire a TB
> > which is invalidated by a tb_flush before we execute it. This doesn't
> > affect the other cpu_exec modes as a tb_flush by it's nature can only
> > occur on a quiescent system. The race was described as:
> >
> >   B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
> >   B3. tcg_tb_alloc obtains a new TB
> >
> >   C3. TB obtained with tb_lookup__cpu_state or tb_gen_code
> >   (same TB as B2)
> >
> >   A3. start_exclusive critical section entered
> >   A4. do_tb_flush is called, TB memory freed/re-allocated
> >   A5. end_exclusive exits critical section
> >
> >   B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
> >   B3. tcg_tb_alloc reallocates TB from B2
> >
> >   C4. start_exclusive critical section entered
> >   C5. cpu_tb_exec executes the TB code that was free in A4
> >
> > The simplest fix is to widen the exclusive period to include the TB
> > lookup. As a result we can drop the complication of checking we are in
> > the exclusive region before we end it.
>
> I'm not 100% keen on having the tb_gen_code within the exclusive region.  It
> implies a much larger delay on (at least) the first execution of the atomic
> operation.
>
> But I suppose until recently we had a global lock around code generation, and
> this is only slightly worse.  Plus, it has the advantage of being dead simple,
> and without the races vs tb_ctx.tb_flush_count that exist in Yifan's patch.
>
> Applied to tcg-next.
>
>
> r~



[Bug 1863025] Re: Use-after-free after flush in TCG accelerator

2020-02-14 Thread Yifan
Apologies, the patch got messed up.

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index c01f59c743..7a9e8c94bd 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -238,8 +238,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
 uint32_t flags;
 uint32_t cflags = 1;
 uint32_t cf_mask = cflags & CF_HASH_MASK;
+unsigned flush_count;
 
 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
+retry:
+flush_count = tb_flush_count();
 tb = tb_lookup__cpu_state(cpu, , _base, , cf_mask);
 if (tb == NULL) {
 mmap_lock();
@@ -248,6 +251,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
 }
 
 start_exclusive();
+/* do_tb_flush() might run and make tb invalid */
+if (flush_count != tb_flush_count()) {
+end_exclusive();
+goto retry;
+}
 
 /* Since we got here, we know that parallel_cpus must be true.  */
 parallel_cpus = false;
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 9f48da9472..2fb7da9b51 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2674,3 +2674,8 @@ void tcg_flush_softmmu_tlb(CPUState *cs)
 tlb_flush(cs);
 #endif
 }
+
+unsigned tb_flush_count(void)
+{
+return atomic_read(_ctx.tb_flush_count);
+}
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index d85e610e85..aa3c2d219a 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -579,6 +579,9 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
 /* exec.c */
 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
 
+/* translate-all.c */
+unsigned tb_flush_count(void);
+
 MemoryRegionSection *
 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
   hwaddr *xlat, hwaddr *plen,

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1863025

Title:
  Use-after-free after flush in TCG accelerator

Status in QEMU:
  Confirmed

Bug description:
  I believe I found a UAF in TCG that can lead to a guest VM escape. The
  security list informed me "This can not be treated as a security
  issue." and to post it here. I am looking at the 4.2.0 source code.
  The issue requires a race and I will try to describe it in terms of
  three concurrent threads.

  Thread A:

  A1. qemu_tcg_cpu_thread_fn runs work loop
  A2. qemu_wait_io_event => qemu_wait_io_event_common => process_queued_cpu_work
  A3. start_exclusive critical section entered
  A4. do_tb_flush is called, TB memory freed/re-allocated
  A5. end_exclusive exits critical section

  Thread B:

  B1. qemu_tcg_cpu_thread_fn runs work loop
  B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
  B3. tcg_tb_alloc obtains a new TB

  Thread C:

  C1. qemu_tcg_cpu_thread_fn runs work loop
  C2. cpu_exec_step_atomic executes
  C3. TB obtained with tb_lookup__cpu_state or tb_gen_code
  C4. start_exclusive critical section entered
  C5. cpu_tb_exec executes the TB code
  C6. end_exclusive exits critical section

  Consider the following sequence of events:
    B2 => B3 => C3 (same TB as B2) => A3 => A4 (TB freed) => A5 => B2 =>
    B3 (re-allocates TB from B2) => C4 => C5 (freed/reused TB now executing) => 
C6

  In short, because thread C uses the TB in the critical section, there
  is no guarantee that the pointer has not been "freed" (rather the
  memory is marked as re-usable) and therefore a use-after-free occurs.

  Since the TCG generated code can be in the same memory as the TB data
  structure, it is possible for an attacker to overwrite the UAF pointer
  with code generated from TCG. This can overwrite key pointer values
  and could lead to code execution on the host outside of the TCG
  sandbox.

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1863025/+subscriptions



[Bug 1863025] Re: Use-after-free after flush in TCG accelerator

2020-02-14 Thread Yifan
I found it just by launching Ubuntu 19.10 live cd with QXL driver. I
will re-test this weekend.

The workaround I had is to check the number of TLB flushes and to re-try
obtaining the TB if the number changes. There is a penalty for the case
where TLB is flushed but should not degrade performance in most cases. I
think obtaining the lock earlier will slow down the VM if EXCP_ATOMIC is
used often.

Of course, I am assuming TLB flush is the only way to cause this bug.

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index d1c2b6ea1fd..d83b578299b 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -250,8 +250,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
 uint32_t flags;
 uint32_t cflags = 1;
 uint32_t cf_mask = cflags & CF_HASH_MASK;
+unsigned flush_count;
 
 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
+retry:
+flush_count = tb_flush_count();
 tb = tb_lookup__cpu_state(cpu, , _base, , cf_mask);
 if (tb == NULL) {
 mmap_lock();
@@ -260,6 +263,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
 }
 
 start_exclusive();
+/* do_tb_flush() might run and make tb invalid */
+if (flush_count != tb_flush_count()) {
+end_exclusive();
+goto retry;
+}
 
 /* Since we got here, we know that parallel_cpus must be true.  */
 parallel_cpus = false;
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 4ed9d0abaf2..ecf7d3b53ff 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2696,6 +2696,11 @@ void tcg_flush_softmmu_tlb(CPUState *cs)
 #endif
 }
 
+unsigned tb_flush_count(void)
+{
+return atomic_read(_ctx.tb_flush_count);
+}
+
 #if defined(CONFIG_NO_RWX)
 void tb_exec_memory_lock(void)
 {
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 5ccc9485812..1bc61fa6d76 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -584,6 +584,7 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
 
 /* translate-all.c */
+unsigned tb_flush_count(void);
 #if defined(CONFIG_NO_RWX)
 void tb_exec_memory_lock(void);
 bool tb_is_exec(const TranslationBlock *tb);

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1863025

Title:
  Use-after-free after flush in TCG accelerator

Status in QEMU:
  Confirmed

Bug description:
  I believe I found a UAF in TCG that can lead to a guest VM escape. The
  security list informed me "This can not be treated as a security
  issue." and to post it here. I am looking at the 4.2.0 source code.
  The issue requires a race and I will try to describe it in terms of
  three concurrent threads.

  Thread A:

  A1. qemu_tcg_cpu_thread_fn runs work loop
  A2. qemu_wait_io_event => qemu_wait_io_event_common => process_queued_cpu_work
  A3. start_exclusive critical section entered
  A4. do_tb_flush is called, TB memory freed/re-allocated
  A5. end_exclusive exits critical section

  Thread B:

  B1. qemu_tcg_cpu_thread_fn runs work loop
  B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
  B3. tcg_tb_alloc obtains a new TB

  Thread C:

  C1. qemu_tcg_cpu_thread_fn runs work loop
  C2. cpu_exec_step_atomic executes
  C3. TB obtained with tb_lookup__cpu_state or tb_gen_code
  C4. start_exclusive critical section entered
  C5. cpu_tb_exec executes the TB code
  C6. end_exclusive exits critical section

  Consider the following sequence of events:
    B2 => B3 => C3 (same TB as B2) => A3 => A4 (TB freed) => A5 => B2 =>
    B3 (re-allocates TB from B2) => C4 => C5 (freed/reused TB now executing) => 
C6

  In short, because thread C uses the TB in the critical section, there
  is no guarantee that the pointer has not been "freed" (rather the
  memory is marked as re-usable) and therefore a use-after-free occurs.

  Since the TCG generated code can be in the same memory as the TB data
  structure, it is possible for an attacker to overwrite the UAF pointer
  with code generated from TCG. This can overwrite key pointer values
  and could lead to code execution on the host outside of the TCG
  sandbox.

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1863025/+subscriptions



[Bug 1863025] [NEW] Use-after-free after flush in TCG accelerator

2020-02-12 Thread Yifan
Public bug reported:

I believe I found a UAF in TCG that can lead to a guest VM escape. The
security list informed me "This can not be treated as a security issue."
and to post it here. I am looking at the 4.2.0 source code. The issue
requires a race and I will try to describe it in terms of three
concurrent threads.

Thread A:

A1. qemu_tcg_cpu_thread_fn runs work loop
A2. qemu_wait_io_event => qemu_wait_io_event_common => process_queued_cpu_work
A3. start_exclusive critical section entered
A4. do_tb_flush is called, TB memory freed/re-allocated
A5. end_exclusive exits critical section

Thread B:

B1. qemu_tcg_cpu_thread_fn runs work loop
B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
B3. tcg_tb_alloc obtains a new TB

Thread C:

C1. qemu_tcg_cpu_thread_fn runs work loop
C2. cpu_exec_step_atomic executes
C3. TB obtained with tb_lookup__cpu_state or tb_gen_code
C4. start_exclusive critical section entered
C5. cpu_tb_exec executes the TB code
C6. end_exclusive exits critical section

Consider the following sequence of events:
  B2 => B3 => C3 (same TB as B2) => A3 => A4 (TB freed) => A5 => B2 =>
  B3 (re-allocates TB from B2) => C4 => C5 (freed/reused TB now executing) => C6

In short, because thread C uses the TB in the critical section, there is
no guarantee that the pointer has not been "freed" (rather the memory is
marked as re-usable) and therefore a use-after-free occurs.

Since the TCG generated code can be in the same memory as the TB data
structure, it is possible for an attacker to overwrite the UAF pointer
with code generated from TCG. This can overwrite key pointer values and
could lead to code execution on the host outside of the TCG sandbox.

** Affects: qemu
 Importance: Undecided
 Status: New

** Description changed:

- I believe I found a UAF in TCG that can lead to a guest VM escape. The 
security 
- list informed me "This can not be treated as a security issue." and to post 
it 
- here. I am looking at the 4.2.0 source code. The issue requires a race and I 
- will try to describe it in terms of three concurrent threads.
- 
- I am looking 
- at the 4.2.0 source code. The issue requires a race and I will try to 
describe 
- it in terms of three concurrent threads.
+ I believe I found a UAF in TCG that can lead to a guest VM escape. The
+ security list informed me "This can not be treated as a security issue."
+ and to post it here. I am looking at the 4.2.0 source code. The issue
+ requires a race and I will try to describe it in terms of three
+ concurrent threads.
  
  Thread A:
  
  A1. qemu_tcg_cpu_thread_fn runs work loop
  A2. qemu_wait_io_event => qemu_wait_io_event_common => process_queued_cpu_work
  A3. start_exclusive critical section entered
  A4. do_tb_flush is called, TB memory freed/re-allocated
  A5. end_exclusive exits critical section
  
  Thread B:
  
  B1. qemu_tcg_cpu_thread_fn runs work loop
  B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
  B3. tcg_tb_alloc obtains a new TB
  
  Thread C:
  
  C1. qemu_tcg_cpu_thread_fn runs work loop
  C2. cpu_exec_step_atomic executes
  C3. TB obtained with tb_lookup__cpu_state or tb_gen_code
  C4. start_exclusive critical section entered
  C5. cpu_tb_exec executes the TB code
  C6. end_exclusive exits critical section
  
  Consider the following sequence of events:
-   B2 => B3 => C3 (same TB as B2) => A3 => A4 (TB freed) => A5 => B2 => 
-   B3 (re-allocates TB from B2) => C4 => C5 (freed/reused TB now executing) => 
C6
+   B2 => B3 => C3 (same TB as B2) => A3 => A4 (TB freed) => A5 => B2 =>
+   B3 (re-allocates TB from B2) => C4 => C5 (freed/reused TB now executing) => 
C6
  
- In short, because thread C uses the TB in the critical section, there is no 
- guarantee that the pointer has not been "freed" (rather the memory is marked 
as 
- re-usable) and therefore a use-after-free occurs.
+ In short, because thread C uses the TB in the critical section, there is
+ no guarantee that the pointer has not been "freed" (rather the memory is
+ marked as re-usable) and therefore a use-after-free occurs.
  
- Since the TCG generated code can be in the same memory as the TB data 
structure,
- it is possible for an attacker to overwrite the UAF pointer with code 
generated
- from TCG. This can overwrite key pointer values and could lead to code 
- execution on the host outside of the TCG sandbox.
+ Since the TCG generated code can be in the same memory as the TB data
+ structure, it is possible for an attacker to overwrite the UAF pointer
+ with code generated from TCG. This can overwrite key pointer values and
+ could lead to code execution on the host outside of the TCG sandbox.

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1863025

Title:
  Use-after-free after flush in TCG accelerator

Status in QEMU:
  New

Bug description:
  I believe I found a UAF in TCG that can lead to a guest VM escape. The
  security list 

[Bug 1863023] [NEW] Deadlock in QXL

2020-02-12 Thread Yifan
Public bug reported:

This is on qemu 4.2.0 OSX host, running fresh Windows 7 with SPICE guest
tools just installed.

Command line: `qemu-system-x86_64 -qmp tcp:localhost:,server,nowait
-smp cpus=2 -boot order=d -m 2048 -soundhw hda -drive
file=hda.img,if=ide,media=disk -spice port=5930,addr=127.0.0.1,disable-
ticketing,image-compression=off,playback-compression=off,streaming-
video=off -vga qxl -device rtl8139,netdev=net0 -netdev user,id=net0`

After the Windows logo, the screen is black. I dump the two vCPU
threads:

```
* thread #16
  * frame #0: 0x7fff523b8ce6 libsystem_kernel.dylib`__psynch_cvwait + 10
frame #1: 0x7fff52467185 libsystem_pthread.dylib`_pthread_cond_wait + 
701
frame #2: 0x000110bf88bd 
qemu-system-x86_64`qemu_cond_wait_impl(cond=0x00011121e8d0, 
mutex=0x00011120ba48, file="cpus-common.c", line=144) at 
qemu-thread-posix.c:173:11 [opt]
frame #3: 0x000110926a59 
qemu-system-x86_64`do_run_on_cpu(cpu=, func=, 
data=, mutex=0x00011120ba48) at cpus-common.c:144:9 [opt]
frame #4: 0x00011080c50a 
qemu-system-x86_64`memory_region_snapshot_and_clear_dirty at memory.c:2595:5 
[opt]
frame #5: 0x00011080c4d7 
qemu-system-x86_64`memory_region_snapshot_and_clear_dirty(mr=, 
addr=0, size=2359296, client=) at memory.c:2107 [opt]
frame #6: 0x000110849fe1 qemu-system-x86_64`vga_update_display 
[inlined] vga_draw_graphic(s=, full_update=0) at vga.c:1661:16 
[opt]
frame #7: 0x00011084996a 
qemu-system-x86_64`vga_update_display(opaque=) at vga.c:1785 [opt]
frame #8: 0x0001109b261d 
qemu-system-x86_64`qxl_hard_reset(d=0x7f84f873, loadvm=0) at 
qxl.c:1285:5 [opt]
frame #9: 0x00011080ac97 
qemu-system-x86_64`memory_region_write_accessor(mr=0x7f84f8741fb0, addr=5, 
value=, size=1, shift=, mask=, 
attrs=MemTxAttrs @ 0x7786d890) at memory.c:483:5 [opt]
frame #10: 0x00011080ab31 
qemu-system-x86_64`memory_region_dispatch_write [inlined] 
access_with_adjusted_size(addr=, value=0x015c6100, 
size=, access_size_min=, 
access_size_max=, access_fn=, mr=, 
attrs=) at memory.c:544:18 [opt]
frame #11: 0x00011080aafd 
qemu-system-x86_64`memory_region_dispatch_write(mr=, 
addr=, data=22831360, op=32644, attrs=MemTxAttrs @ 
0x7786d8c0) at memory.c:1475 [opt]
frame #12: 0x0001107b080d 
qemu-system-x86_64`address_space_stb(as=, addr=, 
val=22831360, attrs=MemTxAttrs @ r12, result=0x) at 
memory_ldst.inc.c:378:13 [opt]
frame #13: 0x000118570230

* thread #18
  * frame #0: 0x7fff523b8ce6 libsystem_kernel.dylib`__psynch_cvwait + 10
frame #1: 0x7fff52467185 libsystem_pthread.dylib`_pthread_cond_wait + 
701
frame #2: 0x000110bf88bd 
qemu-system-x86_64`qemu_cond_wait_impl(cond=0x00011121e860, 
mutex=0x00011121e818, file="cpus-common.c", line=196) at 
qemu-thread-posix.c:173:11 [opt]
frame #3: 0x000110926c44 qemu-system-x86_64`start_exclusive at 
cpus-common.c:196:9 [opt]
frame #4: 0x000110837c35 
qemu-system-x86_64`cpu_exec_step_atomic(cpu=0x7f851829) at 
cpu-exec.c:265:9 [opt]
frame #5: 0x0001107fcf95 
qemu-system-x86_64`qemu_tcg_cpu_thread_fn(arg=0x7f851829) at 
cpus.c:1799:17 [opt]
frame #6: 0x000110bf911e 
qemu-system-x86_64`qemu_thread_start(args=) at 
qemu-thread-posix.c:519:9 [opt]
frame #7: 0x7fff52466e65 libsystem_pthread.dylib`_pthread_start + 148
frame #8: 0x7fff5246283b libsystem_pthread.dylib`thread_start + 15
```

Seems like thread #16 had a STB to QXL MMIO registers which caused it to
call `qxl_hard_reset` and eventually made its way to `do_run_on_cpu`
which waits for `qemu_work_cond`. The only way `qemu_work_cond` is set
is if one of the two vCPU executes the queued work at the end of the TCG
execution. Thread #16 is stuck waiting, so what about thread #18? Thread
#18 is waiting for `exclusive_cond` which is set once all the other CPUs
are done running (but thread #16 is waiting still). So classic deadlock.

** Affects: qemu
 Importance: Undecided
 Status: New

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1863023

Title:
  Deadlock in QXL

Status in QEMU:
  New

Bug description:
  This is on qemu 4.2.0 OSX host, running fresh Windows 7 with SPICE
  guest tools just installed.

  Command line: `qemu-system-x86_64 -qmp
  tcp:localhost:,server,nowait -smp cpus=2 -boot order=d -m 2048
  -soundhw hda -drive file=hda.img,if=ide,media=disk -spice
  port=5930,addr=127.0.0.1,disable-ticketing,image-compression=off
  ,playback-compression=off,streaming-video=off -vga qxl -device
  rtl8139,netdev=net0 -netdev user,id=net0`

  After the Windows logo, the screen is black. I dump the two vCPU
  threads:

  ```
  * thread #16
* frame #0: 0x7fff523b8ce6 libsystem_kernel.dylib`__psynch_cvwait + 10
  frame #1: 0x7fff52467185 

[Qemu-devel] [Bug 1679358] Re: ARM: SCTLR fields not being preserved

2017-04-04 Thread Yifan
So there won't be a fix in the future? I'm working with debugging a
proprietary bootloader that I do not have the source code for. I wonder
if this becomes an issue for any other platform targets.

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1679358

Title:
  ARM: SCTLR fields not being preserved

Status in QEMU:
  New

Bug description:
  There are fields in SCTLR that are RAO/SBOP or WI or in the case of
  the RR field, accessible only in secure mode. Currently it seems that
  qemu just propagates any write to SCTLR to the register and this
  screwed up in a bootloader that I am debugging.

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1679358/+subscriptions



[Qemu-devel] [Bug 1679358] [NEW] ARM: SCTLR fields not being preserved

2017-04-03 Thread Yifan
Public bug reported:

There are fields in SCTLR that are RAO/SBOP or WI or in the case of the
RR field, accessible only in secure mode. Currently it seems that qemu
just propagates any write to SCTLR to the register and this screwed up
in a bootloader that I am debugging.

** Affects: qemu
 Importance: Undecided
 Status: New

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1679358

Title:
  ARM: SCTLR fields not being preserved

Status in QEMU:
  New

Bug description:
  There are fields in SCTLR that are RAO/SBOP or WI or in the case of
  the RR field, accessible only in secure mode. Currently it seems that
  qemu just propagates any write to SCTLR to the register and this
  screwed up in a bootloader that I am debugging.

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1679358/+subscriptions