summaryrefslogtreecommitdiffstats
path: root/meta/recipes-devtools/qemu/qemu/0017-fix-CVE-2018-20126.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-devtools/qemu/qemu/0017-fix-CVE-2018-20126.patch')
-rw-r--r--meta/recipes-devtools/qemu/qemu/0017-fix-CVE-2018-20126.patch113
1 files changed, 113 insertions, 0 deletions
diff --git a/meta/recipes-devtools/qemu/qemu/0017-fix-CVE-2018-20126.patch b/meta/recipes-devtools/qemu/qemu/0017-fix-CVE-2018-20126.patch
new file mode 100644
index 0000000000..8329f2cfd0
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0017-fix-CVE-2018-20126.patch
@@ -0,0 +1,113 @@
+CVE: CVE-2018-20126
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=509f57c]
+
+Backport and rebase patch to fix CVE-2018-20126.
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+From 509f57c98e7536905bb4902363d0cba66ce7e089 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Thu, 13 Dec 2018 01:00:37 +0530
+Subject: [PATCH] pvrdma: release ring object in case of an error
+
+create_cq and create_qp routines allocate ring object, but it's
+not released in case of an error, leading to memory leakage.
+
+Reported-by: Li Qiang <liq3ea@163.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
+Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+---
+ hw/rdma/vmw/pvrdma_cmd.c | 41 ++++++++++++++++++++++++++++++-----------
+ 1 file changed, 30 insertions(+), 11 deletions(-)
+
+diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
+index 4faeb21..9b6796f 100644
+--- a/hw/rdma/vmw/pvrdma_cmd.c
++++ b/hw/rdma/vmw/pvrdma_cmd.c
+@@ -310,6 +310,14 @@ out:
+ return rc;
+ }
+
++static void destroy_cq_ring(PvrdmaRing *ring)
++{
++ pvrdma_ring_free(ring);
++ /* ring_state was in slot 1, not 0 so need to jump back */
++ rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE);
++ g_free(ring);
++}
++
+ static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+ {
+@@ -333,6 +341,10 @@ static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+
+ resp->hdr.err = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev,
+ cmd->cqe, &resp->cq_handle, ring);
++ if (resp->hdr.err) {
++ destroy_cq_ring(ring);
++ }
++
+ resp->cqe = cmd->cqe;
+
+ out:
+@@ -356,10 +368,7 @@ static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ }
+
+ ring = (PvrdmaRing *)cq->opaque;
+- pvrdma_ring_free(ring);
+- /* ring_state was in slot 1, not 0 so need to jump back */
+- rdma_pci_dma_unmap(PCI_DEVICE(dev), --ring->ring_state, TARGET_PAGE_SIZE);
+- g_free(ring);
++ destroy_cq_ring(ring);
+
+ rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
+
+@@ -451,6 +460,17 @@ out:
+ return rc;
+ }
+
++static void destroy_qp_rings(PvrdmaRing *ring)
++{
++ pr_dbg("sring=%p\n", &ring[0]);
++ pvrdma_ring_free(&ring[0]);
++ pr_dbg("rring=%p\n", &ring[1]);
++ pvrdma_ring_free(&ring[1]);
++
++ rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
++ g_free(ring);
++}
++
+ static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+ {
+@@ -482,6 +502,11 @@ static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ cmd->max_recv_wr, cmd->max_recv_sge,
+ cmd->recv_cq_handle, rings, &resp->qpn);
+
++ if (resp->hdr.err) {
++ destroy_qp_rings(rings);
++ return resp->hdr.err;
++ }
++
+ resp->max_send_wr = cmd->max_send_wr;
+ resp->max_recv_wr = cmd->max_recv_wr;
+ resp->max_send_sge = cmd->max_send_sge;
+@@ -555,13 +580,7 @@ static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
+
+ ring = (PvrdmaRing *)qp->opaque;
+- pr_dbg("sring=%p\n", &ring[0]);
+- pvrdma_ring_free(&ring[0]);
+- pr_dbg("rring=%p\n", &ring[1]);
+- pvrdma_ring_free(&ring[1]);
+-
+- rdma_pci_dma_unmap(PCI_DEVICE(dev), ring->ring_state, TARGET_PAGE_SIZE);
+- g_free(ring);
++ destroy_qp_rings(ring);
+
+ return 0;
+ }
+--
+2.20.1
+