From: Marc-André Lureau <marcandre.lur...@redhat.com> max_peer isn't really useful, it tracks the maximum received VM id, but that quickly matches nb_peers, the size of the peers array. Since VM come and go, there might be sparse peers so it doesn't help much in general to have this value around.
Signed-off-by: Marc-André Lureau <marcandre.lur...@redhat.com> --- hw/misc/ivshmem.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index 07f2182..cda7dce 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -90,7 +90,6 @@ typedef struct IVShmemState { Peer *peers; int nb_peers; /* how many guests we have space for */ - int max_peer; /* maximum numbered peer */ int vm_id; uint32_t vectors; @@ -200,7 +199,7 @@ static void ivshmem_io_write(void *opaque, hwaddr addr, case DOORBELL: /* check that dest VM ID is reasonable */ - if (dest > s->max_peer) { + if (dest >= s->nb_peers) { IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest); break; } @@ -574,11 +573,6 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size) /* increment count for particular guest */ s->peers[incoming_posn].nb_eventfds++; - /* keep track of the maximum VM ID */ - if (incoming_posn > s->max_peer) { - s->max_peer = incoming_posn; - } - if (incoming_posn == s->vm_id) { s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s, &s->peers[s->vm_id].eventfds[guest_max_eventfd], @@ -721,8 +715,6 @@ static void pci_ivshmem_realize(PCIDevice *dev, Error **errp) PCI_BASE_ADDRESS_MEM_PREFETCH;; Error *local_err = NULL; - s->max_peer = -1; - if (s->sizearg == NULL) { s->ivshmem_size = 4 << 20; /* 4 MB default */ } else { -- 2.4.3