Signed-off-by: Sakib Sajal <sakib.sa...@windriver.com> --- meta/recipes-devtools/qemu/qemu.inc | 1 + .../qemu/qemu/CVE-2021-3748.patch | 127 ++++++++++++++++++ 2 files changed, 128 insertions(+) create mode 100644 meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc index 970aa96608..7648ce9a38 100644 --- a/meta/recipes-devtools/qemu/qemu.inc +++ b/meta/recipes-devtools/qemu/qemu.inc @@ -78,6 +78,7 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \ file://CVE-2021-3595_2.patch \ file://CVE-2021-3594.patch \ file://CVE-2021-3713.patch \ + file://CVE-2021-3748.patch \ " UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar" diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch new file mode 100644 index 0000000000..4765f24739 --- /dev/null +++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch @@ -0,0 +1,127 @@ +From bacc200f623647632258f7efc0f098ac30dd4225 Mon Sep 17 00:00:00 2001 +From: Jason Wang <jasow...@redhat.com> +Date: Thu, 2 Sep 2021 13:44:12 +0800 +Subject: [PATCH 09/12] virtio-net: fix use after unmap/free for sg + +When mergeable buffer is enabled, we try to set the num_buffers after +the virtqueue elem has been unmapped. This will lead several issues, +E.g a use after free when the descriptor has an address which belongs +to the non direct access region. In this case we use bounce buffer +that is allocated during address_space_map() and freed during +address_space_unmap(). + +Fixing this by storing the elems temporarily in an array and delay the +unmap after we set the the num_buffers. + +This addresses CVE-2021-3748. + +Reported-by: Alexander Bulekov <alx...@bu.edu> +Fixes: fbe78f4f55c6 ("virtio-net support") +Cc: qemu-sta...@nongnu.org +Signed-off-by: Jason Wang <jasow...@redhat.com> + +Upstream-Status: Backport [bedd7e93d01961fcb16a97ae45d93acf357e11f6] +CVE: CVE-2021-3748 + +Signed-off-by: Sakib Sajal <sakib.sa...@windriver.com> +--- + hw/net/virtio-net.c | 39 ++++++++++++++++++++++++++++++++------- + 1 file changed, 32 insertions(+), 7 deletions(-) + +diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c +index 9179013ac..df1d30e2c 100644 +--- a/hw/net/virtio-net.c ++++ b/hw/net/virtio-net.c +@@ -1665,10 +1665,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + VirtIONet *n = qemu_get_nic_opaque(nc); + VirtIONetQueue *q = virtio_net_get_subqueue(nc); + VirtIODevice *vdev = VIRTIO_DEVICE(n); ++ VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE]; ++ size_t lens[VIRTQUEUE_MAX_SIZE]; + struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; + struct virtio_net_hdr_mrg_rxbuf mhdr; + unsigned mhdr_cnt = 0; +- size_t offset, i, guest_offset; ++ size_t offset, i, guest_offset, j; ++ ssize_t err; + + if (!virtio_net_can_receive(nc)) { + return -1; +@@ -1699,6 +1702,12 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + + total = 0; + ++ if (i == VIRTQUEUE_MAX_SIZE) { ++ virtio_error(vdev, "virtio-net unexpected long buffer chain"); ++ err = size; ++ goto err; ++ } ++ + elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); + if (!elem) { + if (i) { +@@ -1710,7 +1719,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + n->guest_hdr_len, n->host_hdr_len, + vdev->guest_features); + } +- return -1; ++ err = -1; ++ goto err; + } + + if (elem->in_num < 1) { +@@ -1718,7 +1728,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + "virtio-net receive queue contains no in buffers"); + virtqueue_detach_element(q->rx_vq, elem, 0); + g_free(elem); +- return -1; ++ err = -1; ++ goto err; + } + + sg = elem->in_sg; +@@ -1755,12 +1766,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + if (!n->mergeable_rx_bufs && offset < size) { + virtqueue_unpop(q->rx_vq, elem, total); + g_free(elem); +- return size; ++ err = size; ++ goto err; + } + +- /* signal other side */ +- virtqueue_fill(q->rx_vq, elem, total, i++); +- g_free(elem); ++ elems[i] = elem; ++ lens[i] = total; ++ i++; + } + + if (mhdr_cnt) { +@@ -1770,10 +1782,23 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + &mhdr.num_buffers, sizeof mhdr.num_buffers); + } + ++ for (j = 0; j < i; j++) { ++ /* signal other side */ ++ virtqueue_fill(q->rx_vq, elems[j], lens[j], j); ++ g_free(elems[j]); ++ } ++ + virtqueue_flush(q->rx_vq, i); + virtio_notify(vdev, q->rx_vq); + + return size; ++ ++err: ++ for (j = 0; j < i; j++) { ++ g_free(elems[j]); ++ } ++ ++ return err; + } + + static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf, +-- +2.31.1 + -- 2.33.0
-=-=-=-=-=-=-=-=-=-=-=- Links: You receive all messages sent to this group. View/Reply Online (#160577): https://lists.openembedded.org/g/openembedded-core/message/160577 Mute This Topic: https://lists.openembedded.org/mt/88426916/21656 Group Owner: openembedded-core+ow...@lists.openembedded.org Unsubscribe: https://lists.openembedded.org/g/openembedded-core/unsub [arch...@mail-archive.com] -=-=-=-=-=-=-=-=-=-=-=-