On Fri, Feb 04, 2011 at 01:32:58AM +0200, Eduard - Gabriel Munteanu wrote:
> Emulated PCI IDE controllers now use the memory access interface. This
> also allows an emulated IOMMU to translate and check accesses.
> 
> Map invalidation results in cancelling DMA transfers. Since the guest OS
> can't properly recover the DMA results in case the mapping is changed,
> this is a fairly good approximation.
> 
> Note this doesn't handle AHCI emulation yet!
> 
> Signed-off-by: Eduard - Gabriel Munteanu <eduard.munte...@linux360.ro>

How about not changing ahci then, and failing initialization
if an mmu is present?

> ---
>  dma-helpers.c     |   23 ++++++++++++++++++-----
>  dma.h             |    4 +++-
>  hw/ide/ahci.c     |    3 ++-
>  hw/ide/internal.h |    1 +
>  hw/ide/macio.c    |    4 ++--
>  hw/ide/pci.c      |   18 +++++++++++-------
>  6 files changed, 37 insertions(+), 16 deletions(-)
> 
> diff --git a/dma-helpers.c b/dma-helpers.c
> index 712ed89..29a74a4 100644
> --- a/dma-helpers.c
> +++ b/dma-helpers.c
> @@ -10,12 +10,13 @@
>  #include "dma.h"
>  #include "block_int.h"
>  
> -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
> +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMADevice *dma)
>  {
>      qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
>      qsg->nsg = 0;
>      qsg->nalloc = alloc_hint;
>      qsg->size = 0;
> +    qsg->dma = dma;
>  }
>  
>  void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
> @@ -73,12 +74,23 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
>      int i;
>  
>      for (i = 0; i < dbs->iov.niov; ++i) {
> -        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
> -                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
> -                                  dbs->iov.iov[i].iov_len);
> +        dma_memory_unmap(dbs->sg->dma,
> +                         dbs->iov.iov[i].iov_base,
> +                         dbs->iov.iov[i].iov_len, !dbs->is_write,
> +                         dbs->iov.iov[i].iov_len);
>      }
>  }
>  
> +static void dma_bdrv_cancel(void *opaque)
> +{
> +    DMAAIOCB *dbs = opaque;
> +
> +    bdrv_aio_cancel(dbs->acb);
> +    dma_bdrv_unmap(dbs);
> +    qemu_iovec_destroy(&dbs->iov);
> +    qemu_aio_release(dbs);
> +}
> +
>  static void dma_bdrv_cb(void *opaque, int ret)
>  {
>      DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> @@ -100,7 +112,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
>      while (dbs->sg_cur_index < dbs->sg->nsg) {
>          cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
>          cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
> -        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
> +        mem = dma_memory_map(dbs->sg->dma, dma_bdrv_cancel, dbs,
> +                             cur_addr, &cur_len, !dbs->is_write);
>          if (!mem)
>              break;
>          qemu_iovec_add(&dbs->iov, mem, cur_len);
> diff --git a/dma.h b/dma.h
> index f3bb275..2417b32 100644
> --- a/dma.h
> +++ b/dma.h
> @@ -14,6 +14,7 @@
>  //#include "cpu.h"
>  #include "hw/hw.h"
>  #include "block.h"
> +#include "hw/dma_rw.h"
>  
>  typedef struct {
>      target_phys_addr_t base;
> @@ -25,9 +26,10 @@ typedef struct {
>      int nsg;
>      int nalloc;
>      target_phys_addr_t size;
> +    DMADevice *dma;
>  } QEMUSGList;
>  
> -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
> +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMADevice *dma);
>  void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
>                       target_phys_addr_t len);
>  void qemu_sglist_destroy(QEMUSGList *qsg);
> diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
> index 968fdce..aea06a9 100644
> --- a/hw/ide/ahci.c
> +++ b/hw/ide/ahci.c
> @@ -993,7 +993,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, 
> QEMUSGList *sglist)
>      if (sglist_alloc_hint > 0) {
>          AHCI_SG *tbl = (AHCI_SG *)prdt;
>  
> -        qemu_sglist_init(sglist, sglist_alloc_hint);
> +        /* FIXME: pass a proper DMADevice. */
> +        qemu_sglist_init(sglist, sglist_alloc_hint, NULL);
>          for (i = 0; i < sglist_alloc_hint; i++) {
>              /* flags_size is zero-based */
>              qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
> diff --git a/hw/ide/internal.h b/hw/ide/internal.h
> index 697c3b4..3d3d5db 100644
> --- a/hw/ide/internal.h
> +++ b/hw/ide/internal.h
> @@ -468,6 +468,7 @@ struct IDEDMA {
>      struct iovec iov;
>      QEMUIOVector qiov;
>      BlockDriverAIOCB *aiocb;
> +    DMADevice *dev;
>  };
>  
>  struct IDEBus {
> diff --git a/hw/ide/macio.c b/hw/ide/macio.c
> index c1b4caa..654ae7c 100644
> --- a/hw/ide/macio.c
> +++ b/hw/ide/macio.c
> @@ -79,7 +79,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int 
> ret)
>  
>      s->io_buffer_size = io->len;
>  
> -    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
> +    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL);
>      qemu_sglist_add(&s->sg, io->addr, io->len);
>      io->addr += io->len;
>      io->len = 0;
> @@ -141,7 +141,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
>      s->io_buffer_index = 0;
>      s->io_buffer_size = io->len;
>  
> -    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
> +    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL);
>      qemu_sglist_add(&s->sg, io->addr, io->len);
>      io->addr += io->len;
>      io->len = 0;
> diff --git a/hw/ide/pci.c b/hw/ide/pci.c
> index 510b2de..e3432c4 100644
> --- a/hw/ide/pci.c
> +++ b/hw/ide/pci.c
> @@ -64,7 +64,8 @@ static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
>      } prd;
>      int l, len;
>  
> -    qemu_sglist_init(&s->sg, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
> +    qemu_sglist_init(&s->sg,
> +                     s->nsector / (BMDMA_PAGE_SIZE / 512) + 1, dma->dev);
>      s->io_buffer_size = 0;
>      for(;;) {
>          if (bm->cur_prd_len == 0) {
> @@ -72,7 +73,7 @@ static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
>              if (bm->cur_prd_last ||
>                  (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
>                  return s->io_buffer_size != 0;
> -            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
> +            dma_memory_read(dma->dev, bm->cur_addr, (uint8_t *)&prd, 8);
>              bm->cur_addr += 8;
>              prd.addr = le32_to_cpu(prd.addr);
>              prd.size = le32_to_cpu(prd.size);
> @@ -114,7 +115,7 @@ static int bmdma_rw_buf(IDEDMA *dma, int is_write)
>              if (bm->cur_prd_last ||
>                  (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
>                  return 0;
> -            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
> +            dma_memory_read(dma->dev, bm->cur_addr, (uint8_t *)&prd, 8);
>              bm->cur_addr += 8;
>              prd.addr = le32_to_cpu(prd.addr);
>              prd.size = le32_to_cpu(prd.size);
> @@ -129,11 +130,11 @@ static int bmdma_rw_buf(IDEDMA *dma, int is_write)
>              l = bm->cur_prd_len;
>          if (l > 0) {
>              if (is_write) {
> -                cpu_physical_memory_write(bm->cur_prd_addr,
> -                                          s->io_buffer + s->io_buffer_index, 
> l);
> +                dma_memory_write(dma->dev, bm->cur_prd_addr,
> +                                 s->io_buffer + s->io_buffer_index, l);
>              } else {
> -                cpu_physical_memory_read(bm->cur_prd_addr,
> -                                          s->io_buffer + s->io_buffer_index, 
> l);
> +                dma_memory_read(dma->dev, bm->cur_prd_addr,
> +                                s->io_buffer + s->io_buffer_index, l);
>              }
>              bm->cur_prd_addr += l;
>              bm->cur_prd_len -= l;
> @@ -444,6 +445,9 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo 
> **hd_table)
>              continue;
>          ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
>      }
> +
> +    d->bmdma[0].dma.dev = &dev->dma;
> +    d->bmdma[1].dma.dev = &dev->dma;
>  }
>  
>  static const struct IDEDMAOps bmdma_ops = {
> -- 
> 1.7.3.4

Reply via email to