diff --git a/SOURCES/kvm-clean-up-callback-when-del-virtqueue.patch b/SOURCES/kvm-clean-up-callback-when-del-virtqueue.patch new file mode 100644 index 0000000..2dfe279 --- /dev/null +++ b/SOURCES/kvm-clean-up-callback-when-del-virtqueue.patch @@ -0,0 +1,55 @@ +From c17dffff03c6ba03633b8e009599bd3863c638fa Mon Sep 17 00:00:00 2001 +From: Julia Suvorova +Date: Wed, 4 Mar 2020 20:07:51 -0500 +Subject: [PATCH 09/12] clean up callback when del virtqueue + +RH-Author: Julia Suvorova +Message-id: <20200304200754.32708-2-jusual@redhat.com> +Patchwork-id: 94152 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 1/4] clean up callback when del virtqueue +Bugzilla: 1721403 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Michael S. Tsirkin + +From: liujunjie + +Before, we did not clear callback like handle_output when delete +the virtqueue which may result be segmentfault. +The scene is as follows: +1. Start a vm with multiqueue vhost-net, +2. then we write VIRTIO_PCI_GUEST_FEATURES in PCI configuration to +triger multiqueue disable in this vm which will delete the virtqueue. +In this step, the tx_bh is deleted but the callback virtio_net_handle_tx_bh +still exist. +3. Finally, we write VIRTIO_PCI_QUEUE_NOTIFY in PCI configuration to +notify the deleted virtqueue. In this way, virtio_net_handle_tx_bh +will be called and qemu will be crashed. + +Although the way described above is uncommon, we had better reinforce it. + +CC: qemu-stable@nongnu.org +Signed-off-by: liujunjie +Signed-off-by: Jason Wang +(cherry picked from commit 7da2d99fb9fbf30104125c061caaff330e362d74) +Signed-off-by: Jon Maloy +--- + hw/virtio/virtio.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c +index 08a4332210..d228b9297d 100644 +--- a/hw/virtio/virtio.c ++++ b/hw/virtio/virtio.c +@@ -1610,6 +1610,8 @@ void virtio_del_queue(VirtIODevice *vdev, int n) + + vdev->vq[n].vring.num = 0; + vdev->vq[n].vring.num_default = 0; ++ vdev->vq[n].handle_output = NULL; ++ vdev->vq[n].handle_aio_output = NULL; + } + + static void virtio_set_isr(VirtIODevice *vdev, int value) +-- +2.18.2 + diff --git a/SOURCES/kvm-file-posix-Use-max-transfer-length-segment-count-onl.patch b/SOURCES/kvm-file-posix-Use-max-transfer-length-segment-count-onl.patch new file mode 100644 index 0000000..bda93a1 --- /dev/null +++ b/SOURCES/kvm-file-posix-Use-max-transfer-length-segment-count-onl.patch @@ -0,0 +1,157 @@ +From 07dc43478f4d96eb1cd21a40a26262beb1ca2dd6 Mon Sep 17 00:00:00 2001 +From: Maxim Levitsky +Date: Mon, 27 Apr 2020 08:39:13 +0200 +Subject: [PATCH] file-posix: Use max transfer length/segment count only for + SCSI passthrough + +RH-Author: Maxim Levitsky +Message-id: <20200420155959.20876-2-mlevitsk@redhat.com> +Patchwork-id: 95748 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 1/1] file-posix: Use max transfer length/segment count only for SCSI passthrough +Bugzilla: 1819253 +RH-Acked-by: Stefan Hajnoczi +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Max Reitz + +Regular kernel block devices (/dev/sda*, /dev/nvme*, etc) don't have +max segment size/max segment count hardware requirements exposed +to the userspace, but rather the kernel block layer +takes care to split the incoming requests that +violate these requirements. + +Allowing the kernel to do the splitting allows qemu to avoid +various overheads that arise otherwise from this. + +This is especially visible in nbd server, +exposing as a raw file, a mostly empty qcow2 image over the net. +In this case most of the reads by the remote user +won't even hit the underlying kernel block device, +and therefore most of the overhead will be in the +nbd traffic which increases significantly with lower max transfer size. + +In addition to that even for local block device +access the peformance improves a bit due to less +traffic between qemu and the kernel when large +transfer sizes are used (e.g for image conversion) + +More info can be found at: +https://bugzilla.redhat.com/show_bug.cgi?id=1647104 + +Signed-off-by: Maxim Levitsky +Reviewed-by: Stefan Hajnoczi +Reviewed-by: Eric Blake +Reviewed-by: Pankaj Gupta +Signed-off-by: Kevin Wolf +(cherry picked from commit 867eccfed84f96b54f4a432c510a02c2ce03b430) +Signed-off-by: Maxim Levitsky +Signed-off-by: Miroslav Rezanina +--- + block/file-posix.c | 54 ++++++++++++++++++++++++++++-------------------------- + 1 file changed, 28 insertions(+), 26 deletions(-) + +diff --git a/block/file-posix.c b/block/file-posix.c +index 548424d..9e5cd68 100644 +--- a/block/file-posix.c ++++ b/block/file-posix.c +@@ -1008,15 +1008,13 @@ static void raw_reopen_abort(BDRVReopenState *state) + s->reopen_state = NULL; + } + +-static int hdev_get_max_transfer_length(BlockDriverState *bs, int fd) ++static int sg_get_max_transfer_length(int fd) + { + #ifdef BLKSECTGET + int max_bytes = 0; +- short max_sectors = 0; +- if (bs->sg && ioctl(fd, BLKSECTGET, &max_bytes) == 0) { ++ ++ if (ioctl(fd, BLKSECTGET, &max_bytes) == 0) { + return max_bytes; +- } else if (!bs->sg && ioctl(fd, BLKSECTGET, &max_sectors) == 0) { +- return max_sectors << BDRV_SECTOR_BITS; + } else { + return -errno; + } +@@ -1025,25 +1023,31 @@ static int hdev_get_max_transfer_length(BlockDriverState *bs, int fd) + #endif + } + +-static int hdev_get_max_segments(const struct stat *st) ++static int sg_get_max_segments(int fd) + { + #ifdef CONFIG_LINUX + char buf[32]; + const char *end; +- char *sysfspath; ++ char *sysfspath = NULL; + int ret; +- int fd = -1; ++ int sysfd = -1; + long max_segments; ++ struct stat st; ++ ++ if (fstat(fd, &st)) { ++ ret = -errno; ++ goto out; ++ } + + sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments", +- major(st->st_rdev), minor(st->st_rdev)); +- fd = open(sysfspath, O_RDONLY); +- if (fd == -1) { ++ major(st.st_rdev), minor(st.st_rdev)); ++ sysfd = open(sysfspath, O_RDONLY); ++ if (sysfd == -1) { + ret = -errno; + goto out; + } + do { +- ret = read(fd, buf, sizeof(buf) - 1); ++ ret = read(sysfd, buf, sizeof(buf) - 1); + } while (ret == -1 && errno == EINTR); + if (ret < 0) { + ret = -errno; +@@ -1060,8 +1064,8 @@ static int hdev_get_max_segments(const struct stat *st) + } + + out: +- if (fd != -1) { +- close(fd); ++ if (sysfd != -1) { ++ close(sysfd); + } + g_free(sysfspath); + return ret; +@@ -1073,19 +1077,17 @@ out: + static void raw_refresh_limits(BlockDriverState *bs, Error **errp) + { + BDRVRawState *s = bs->opaque; +- struct stat st; + +- if (!fstat(s->fd, &st)) { +- if (S_ISBLK(st.st_mode) || S_ISCHR(st.st_mode)) { +- int ret = hdev_get_max_transfer_length(bs, s->fd); +- if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) { +- bs->bl.max_transfer = pow2floor(ret); +- } +- ret = hdev_get_max_segments(&st); +- if (ret > 0) { +- bs->bl.max_transfer = MIN(bs->bl.max_transfer, +- ret * getpagesize()); +- } ++ if (bs->sg) { ++ int ret = sg_get_max_transfer_length(s->fd); ++ ++ if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) { ++ bs->bl.max_transfer = pow2floor(ret); ++ } ++ ++ ret = sg_get_max_segments(s->fd); ++ if (ret > 0) { ++ bs->bl.max_transfer = MIN(bs->bl.max_transfer, ret * getpagesize()); + } + } + +-- +1.8.3.1 + diff --git a/SOURCES/kvm-gluster-Handle-changed-glfs_ftruncate-signature.patch b/SOURCES/kvm-gluster-Handle-changed-glfs_ftruncate-signature.patch new file mode 100644 index 0000000..d765e13 --- /dev/null +++ b/SOURCES/kvm-gluster-Handle-changed-glfs_ftruncate-signature.patch @@ -0,0 +1,94 @@ +From afbc21e30ba77c76a6cce13b95940e32e43213bc Mon Sep 17 00:00:00 2001 +From: Maxim Levitsky +Date: Mon, 17 Feb 2020 14:52:04 -0500 +Subject: [PATCH 01/12] gluster: Handle changed glfs_ftruncate signature + +RH-Author: Maxim Levitsky +Message-id: <20200217145205.21347-2-mlevitsk@redhat.com> +Patchwork-id: 93894 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 1/2] gluster: Handle changed glfs_ftruncate signature +Bugzilla: 1802216 +RH-Acked-by: Stefan Hajnoczi +RH-Acked-by: Sergio Lopez Pascual +RH-Acked-by: Stefano Garzarella + +From: Prasanna Kumar Kalever + +New versions of Glusters libgfapi.so have an updated glfs_ftruncate() +function that returns additional 'struct stat' structures to enable +advanced caching of attributes. This is useful for file servers, not so +much for QEMU. Nevertheless, the API has changed and needs to be +adopted. + +Signed-off-by: Prasanna Kumar Kalever +Signed-off-by: Niels de Vos +Signed-off-by: Kevin Wolf +(cherry picked from commit e014dbe74e0484188164c61ff6843f8a04a8cb9d) +Signed-off-by: Maxim Levitsky +Signed-off-by: Jon Maloy +--- + block/gluster.c | 4 ++++ + configure | 18 ++++++++++++++++++ + 2 files changed, 22 insertions(+) + +diff --git a/block/gluster.c b/block/gluster.c +index 8c13002fd4..e3ffa6136e 100644 +--- a/block/gluster.c ++++ b/block/gluster.c +@@ -20,6 +20,10 @@ + #include "qemu/option.h" + #include "qemu/cutils.h" + ++#ifdef CONFIG_GLUSTERFS_FTRUNCATE_HAS_STAT ++# define glfs_ftruncate(fd, offset) glfs_ftruncate(fd, offset, NULL, NULL) ++#endif ++ + #define GLUSTER_OPT_FILENAME "filename" + #define GLUSTER_OPT_VOLUME "volume" + #define GLUSTER_OPT_PATH "path" +diff --git a/configure b/configure +index 285fd47de3..f78663367e 100755 +--- a/configure ++++ b/configure +@@ -429,6 +429,7 @@ glusterfs_xlator_opt="no" + glusterfs_discard="no" + glusterfs_fallocate="no" + glusterfs_zerofill="no" ++glusterfs_ftruncate_has_stat="no" + gtk="" + gtkabi="" + gtk_gl="no" +@@ -3871,6 +3872,19 @@ if test "$glusterfs" != "no" ; then + glusterfs_fallocate="yes" + glusterfs_zerofill="yes" + fi ++ cat > $TMPC << EOF ++#include ++ ++int ++main(void) ++{ ++ /* new glfs_ftruncate() passes two additional args */ ++ return glfs_ftruncate(NULL, 0, NULL, NULL); ++} ++EOF ++ if compile_prog "$glusterfs_cflags" "$glusterfs_libs" ; then ++ glusterfs_ftruncate_has_stat="yes" ++ fi + else + if test "$glusterfs" = "yes" ; then + feature_not_found "GlusterFS backend support" \ +@@ -6529,6 +6543,10 @@ if test "$glusterfs_zerofill" = "yes" ; then + echo "CONFIG_GLUSTERFS_ZEROFILL=y" >> $config_host_mak + fi + ++if test "$glusterfs_ftruncate_has_stat" = "yes" ; then ++ echo "CONFIG_GLUSTERFS_FTRUNCATE_HAS_STAT=y" >> $config_host_mak ++fi ++ + if test "$libssh2" = "yes" ; then + echo "CONFIG_LIBSSH2=m" >> $config_host_mak + echo "LIBSSH2_CFLAGS=$libssh2_cflags" >> $config_host_mak +-- +2.18.2 + diff --git a/SOURCES/kvm-gluster-the-glfs_io_cbk-callback-function-pointer-ad.patch b/SOURCES/kvm-gluster-the-glfs_io_cbk-callback-function-pointer-ad.patch new file mode 100644 index 0000000..396c77a --- /dev/null +++ b/SOURCES/kvm-gluster-the-glfs_io_cbk-callback-function-pointer-ad.patch @@ -0,0 +1,105 @@ +From d0cf28359d8fee8437d664f98121b8af85a5d12e Mon Sep 17 00:00:00 2001 +From: Maxim Levitsky +Date: Mon, 17 Feb 2020 14:52:05 -0500 +Subject: [PATCH 02/12] gluster: the glfs_io_cbk callback function pointer adds + pre/post stat args + +RH-Author: Maxim Levitsky +Message-id: <20200217145205.21347-3-mlevitsk@redhat.com> +Patchwork-id: 93893 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 2/2] gluster: the glfs_io_cbk callback function pointer adds pre/post stat args +Bugzilla: 1802216 +RH-Acked-by: Stefan Hajnoczi +RH-Acked-by: Sergio Lopez Pascual +RH-Acked-by: Stefano Garzarella + +From: Niels de Vos + +The glfs_*_async() functions do a callback once finished. This callback +has changed its arguments, pre- and post-stat structures have been +added. This makes it possible to improve caching, which is useful for +Samba and NFS-Ganesha, but not so much for QEMU. Gluster 6 is the first +release that includes these new arguments. + +With an additional detection in ./configure, the new arguments can +conditionally get included in the glfs_io_cbk handler. + +Signed-off-by: Niels de Vos +Signed-off-by: Kevin Wolf +(cherry picked from commit 0e3b891fefacc0e49f3c8ffa3a753b69eb7214d2) +Signed-off-by: Maxim Levitsky +Signed-off-by: Jon Maloy +--- + block/gluster.c | 6 +++++- + configure | 24 ++++++++++++++++++++++++ + 2 files changed, 29 insertions(+), 1 deletion(-) + +diff --git a/block/gluster.c b/block/gluster.c +index e3ffa6136e..a6ac2b1dc1 100644 +--- a/block/gluster.c ++++ b/block/gluster.c +@@ -729,7 +729,11 @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, + /* + * AIO callback routine called from GlusterFS thread. + */ +-static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) ++static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, ++#ifdef CONFIG_GLUSTERFS_IOCB_HAS_STAT ++ struct glfs_stat *pre, struct glfs_stat *post, ++#endif ++ void *arg) + { + GlusterAIOCB *acb = (GlusterAIOCB *)arg; + +diff --git a/configure b/configure +index f78663367e..da50c091df 100755 +--- a/configure ++++ b/configure +@@ -430,6 +430,7 @@ glusterfs_discard="no" + glusterfs_fallocate="no" + glusterfs_zerofill="no" + glusterfs_ftruncate_has_stat="no" ++glusterfs_iocb_has_stat="no" + gtk="" + gtkabi="" + gtk_gl="no" +@@ -3885,6 +3886,25 @@ EOF + if compile_prog "$glusterfs_cflags" "$glusterfs_libs" ; then + glusterfs_ftruncate_has_stat="yes" + fi ++ cat > $TMPC << EOF ++#include ++ ++/* new glfs_io_cbk() passes two additional glfs_stat structs */ ++static void ++glusterfs_iocb(glfs_fd_t *fd, ssize_t ret, struct glfs_stat *prestat, struct glfs_stat *poststat, void *data) ++{} ++ ++int ++main(void) ++{ ++ glfs_io_cbk iocb = &glusterfs_iocb; ++ iocb(NULL, 0 , NULL, NULL, NULL); ++ return 0; ++} ++EOF ++ if compile_prog "$glusterfs_cflags" "$glusterfs_libs" ; then ++ glusterfs_iocb_has_stat="yes" ++ fi + else + if test "$glusterfs" = "yes" ; then + feature_not_found "GlusterFS backend support" \ +@@ -6547,6 +6567,10 @@ if test "$glusterfs_ftruncate_has_stat" = "yes" ; then + echo "CONFIG_GLUSTERFS_FTRUNCATE_HAS_STAT=y" >> $config_host_mak + fi + ++if test "$glusterfs_iocb_has_stat" = "yes" ; then ++ echo "CONFIG_GLUSTERFS_IOCB_HAS_STAT=y" >> $config_host_mak ++fi ++ + if test "$libssh2" = "yes" ; then + echo "CONFIG_LIBSSH2=m" >> $config_host_mak + echo "LIBSSH2_CFLAGS=$libssh2_cflags" >> $config_host_mak +-- +2.18.2 + diff --git a/SOURCES/kvm-seccomp-set-the-seccomp-filter-to-all-threads.patch b/SOURCES/kvm-seccomp-set-the-seccomp-filter-to-all-threads.patch new file mode 100644 index 0000000..10796ca --- /dev/null +++ b/SOURCES/kvm-seccomp-set-the-seccomp-filter-to-all-threads.patch @@ -0,0 +1,77 @@ +From 72b8c0856ace652a57145ea58b0bec9d67d23851 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Tue, 7 Apr 2020 07:03:37 +0200 +Subject: [PATCH] seccomp: set the seccomp filter to all threads +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eduardo Otubo +Message-id: <20200305125537.4031-1-otubo@redhat.com> +Patchwork-id: 94159 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH] seccomp: set the seccomp filter to all threads +Bugzilla: 1618504 1622976 +RH-Acked-by: Marc-André Lureau +RH-Acked-by: Stefan Hajnoczi +RH-Acked-by: Mohammed Gamal + +BZ: 1618504 +BRANCH: rhv78/master +BREW: 27052907 + +commit 70dfabeaa79ba4d7a3b699abe1a047c8012db114 +Author: Marc-André Lureau +Date: Wed Aug 22 19:02:50 2018 +0200 + + When using "-seccomp on", the seccomp policy is only applied to the + main thread, the vcpu worker thread and other worker threads created + after seccomp policy is applied; the seccomp policy is not applied to + e.g. the RCU thread because it is created before the seccomp policy is + applied and SECCOMP_FILTER_FLAG_TSYNC isn't used. + + This can be verified with + for task in /proc/`pidof qemu`/task/*; do cat $task/status | grep Secc ; done + Seccomp: 2 + Seccomp: 0 + Seccomp: 0 + Seccomp: 2 + Seccomp: 2 + Seccomp: 2 + + Starting with libseccomp 2.2.0 and kernel >= 3.17, we can use + seccomp_attr_set(ctx, > SCMP_FLTATR_CTL_TSYNC, 1) to update the policy + on all threads. + + libseccomp requirement was bumped to 2.2.0 in previous patch. + libseccomp should fail to set the filter if it can't honour + SCMP_FLTATR_CTL_TSYNC (untested), and thus -sandbox will now fail on + kernel < 3.17. + + Signed-off-by: Marc-André Lureau + Acked-by: Eduardo Otubo + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + qemu-seccomp.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/qemu-seccomp.c b/qemu-seccomp.c +index b770a77..a5455af 100644 +--- a/qemu-seccomp.c ++++ b/qemu-seccomp.c +@@ -108,6 +108,11 @@ int seccomp_start(uint32_t seccomp_opts) + goto seccomp_return; + } + ++ rc = seccomp_attr_set(ctx, SCMP_FLTATR_CTL_TSYNC, 1); ++ if (rc != 0) { ++ goto seccomp_return; ++ } ++ + for (i = 0; i < ARRAY_SIZE(blacklist); i++) { + if (!(seccomp_opts & blacklist[i].set)) { + continue; +-- +1.8.3.1 + diff --git a/SOURCES/kvm-target-i386-add-a-ucode-rev-property.patch b/SOURCES/kvm-target-i386-add-a-ucode-rev-property.patch new file mode 100644 index 0000000..6a4cbd1 --- /dev/null +++ b/SOURCES/kvm-target-i386-add-a-ucode-rev-property.patch @@ -0,0 +1,129 @@ +From ce999ee40575a2d6ebd862697af96169c8bfcb5f Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Mon, 17 Feb 2020 16:23:19 -0500 +Subject: [PATCH 04/12] target/i386: add a ucode-rev property + +RH-Author: Paolo Bonzini +Message-id: <20200217162323.2572-3-pbonzini@redhat.com> +Patchwork-id: 93901 +O-Subject: [RHEL7.9 qemu-kvm-rhev PATCH 2/6] target/i386: add a ucode-rev property +Bugzilla: 1791653 +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Eduardo Habkost +RH-Acked-by: Dr. David Alan Gilbert + +Add the property and plumb it in TCG and HVF (the latter of which +tried to support returning a constant value but used the wrong MSR). + +Signed-off-by: Paolo Bonzini +Message-Id: <1579544504-3616-3-git-send-email-pbonzini@redhat.com> +Signed-off-by: Paolo Bonzini +(cherry picked from commit 4e45aff398cd1542c2a384a2a3b8600f23337d86) + +[RHEL7: replace env_archcpu with x86_env_get_cpu] + +Signed-off-by: Jon Maloy +--- + target/i386/cpu.c | 10 ++++++++++ + target/i386/cpu.h | 4 ++++ + target/i386/hvf/x86_emu.c | 4 +--- + target/i386/misc_helper.c | 4 ++++ + 4 files changed, 19 insertions(+), 3 deletions(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 4d87879328..ad905d6f8c 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -4943,6 +4943,15 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) + goto out; + } + ++ if (cpu->ucode_rev == 0) { ++ /* The default is the same as KVM's. */ ++ if (IS_AMD_CPU(env)) { ++ cpu->ucode_rev = 0x01000065; ++ } else { ++ cpu->ucode_rev = 0x100000000ULL; ++ } ++ } ++ + if (cpu->apic_id == UNASSIGNED_APIC_ID) { + error_setg(errp, "apic-id property was not initialized properly"); + return; +@@ -5553,6 +5562,7 @@ static Property x86_cpu_properties[] = { + DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), + DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), + DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), ++ DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), + DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), + DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), + DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index 65c4fda102..4f3fc8ed59 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -353,6 +353,8 @@ typedef enum X86Seg { + #define MSR_IA32_SPEC_CTRL 0x48 + #define MSR_VIRT_SSBD 0xc001011f + #define MSR_IA32_PRED_CMD 0x49 ++#define MSR_IA32_UCODE_REV 0x8b ++ + #define MSR_IA32_ARCH_CAPABILITIES 0x10a + #define ARCH_CAP_TSX_CTRL_MSR (1<<7) + +@@ -1370,6 +1372,8 @@ struct X86CPU { + + CPUX86State env; + ++ uint64_t ucode_rev; ++ + bool hyperv_vapic; + bool hyperv_relaxed_timing; + int hyperv_spinlock_attempts; +diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c +index 3ea18edc68..e0986a9b5a 100644 +--- a/target/i386/hvf/x86_emu.c ++++ b/target/i386/hvf/x86_emu.c +@@ -658,8 +658,6 @@ static void exec_lods(struct CPUX86State *env, struct x86_decode *decode) + RIP(env) += decode->len; + } + +-#define MSR_IA32_UCODE_REV 0x00000017 +- + void simulate_rdmsr(struct CPUState *cpu) + { + X86CPU *x86_cpu = X86_CPU(cpu); +@@ -675,7 +673,7 @@ void simulate_rdmsr(struct CPUState *cpu) + val = cpu_get_apic_base(X86_CPU(cpu)->apic_state); + break; + case MSR_IA32_UCODE_REV: +- val = (0x100000000ULL << 32) | 0x100000000ULL; ++ val = x86_cpu->ucode_rev; + break; + case MSR_EFER: + val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER); +diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c +index 628f64aad5..f93e61f05d 100644 +--- a/target/i386/misc_helper.c ++++ b/target/i386/misc_helper.c +@@ -229,6 +229,7 @@ void helper_rdmsr(CPUX86State *env) + #else + void helper_wrmsr(CPUX86State *env) + { ++ X86CPU *x86_cpu = x86_env_get_cpu(env); + uint64_t val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); +@@ -371,6 +372,9 @@ void helper_wrmsr(CPUX86State *env) + env->msr_bndcfgs = val; + cpu_sync_bndcs_hflags(env); + break; ++ case MSR_IA32_UCODE_REV: ++ val = x86_cpu->ucode_rev; ++ break; + default: + if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL + && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + +-- +2.18.2 + diff --git a/SOURCES/kvm-target-i386-check-for-availability-of-MSR_IA32_UCODE.patch b/SOURCES/kvm-target-i386-check-for-availability-of-MSR_IA32_UCODE.patch new file mode 100644 index 0000000..5fbf1b3 --- /dev/null +++ b/SOURCES/kvm-target-i386-check-for-availability-of-MSR_IA32_UCODE.patch @@ -0,0 +1,72 @@ +From 2b1ca7468155b2bda5d81be114335e264767cc7a Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Mon, 17 Feb 2020 16:23:22 -0500 +Subject: [PATCH 07/12] target/i386: check for availability of + MSR_IA32_UCODE_REV as an emulated MSR +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Paolo Bonzini +Message-id: <20200217162323.2572-6-pbonzini@redhat.com> +Patchwork-id: 93908 +O-Subject: [RHEL7.9 qemu-kvm-rhev PATCH 5/6] target/i386: check for availability of MSR_IA32_UCODE_REV as an emulated MSR +Bugzilla: 1791653 +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Philippe Mathieu-Daudé +RH-Acked-by: Dr. David Alan Gilbert + +Even though MSR_IA32_UCODE_REV has been available long before Linux 5.6, +which added it to the emulated MSR list, a bug caused the microcode +version to revert to 0x100000000 on INIT. As a result, processors other +than the bootstrap processor would not see the host microcode revision; +some Windows version complain loudly about this and crash with a +fairly explicit MICROCODE REVISION MISMATCH error. + +[If running 5.6 prereleases, the kernel fix "KVM: x86: do not reset + microcode version on INIT or RESET" should also be applied.] + +Reported-by: Alex Williamson +Message-id: <20200211175516.10716-1-pbonzini@redhat.com> +Signed-off-by: Paolo Bonzini +(cherry picked from commit 6702514814c7e7b4cbf179624539b5f38c72740b) +Signed-off-by: Jon Maloy +--- + target/i386/kvm.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/target/i386/kvm.c b/target/i386/kvm.c +index 4d43fba716..22da78aee9 100644 +--- a/target/i386/kvm.c ++++ b/target/i386/kvm.c +@@ -98,6 +98,7 @@ static bool has_msr_tsx_ctrl; + static bool has_msr_virt_ssbd; + static bool has_msr_smi_count; + static bool has_msr_arch_capabs; ++static bool has_msr_ucode_rev; + + static uint32_t has_architectural_pmu_version; + static uint32_t num_architectural_pmu_gp_counters; +@@ -1354,6 +1355,9 @@ static int kvm_get_supported_msrs(KVMState *s) + case MSR_IA32_ARCH_CAPABILITIES: + has_msr_arch_capabs = true; + break; ++ case MSR_IA32_UCODE_REV: ++ has_msr_ucode_rev = true; ++ break; + } + } + } +@@ -1828,8 +1832,7 @@ static void kvm_init_msrs(X86CPU *cpu) + env->features[FEAT_ARCH_CAPABILITIES]); + } + +- if (kvm_arch_get_supported_msr_feature(kvm_state, +- MSR_IA32_UCODE_REV)) { ++ if (has_msr_ucode_rev) { + kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); + } + +-- +2.18.2 + diff --git a/SOURCES/kvm-target-i386-enable-monitor-and-ucode-revision-with-c.patch b/SOURCES/kvm-target-i386-enable-monitor-and-ucode-revision-with-c.patch new file mode 100644 index 0000000..12b6a18 --- /dev/null +++ b/SOURCES/kvm-target-i386-enable-monitor-and-ucode-revision-with-c.patch @@ -0,0 +1,49 @@ +From 6bd31dbbc477571124ea1ce9e64217c898f05f22 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Mon, 17 Feb 2020 16:23:23 -0500 +Subject: [PATCH 08/12] target/i386: enable monitor and ucode revision with + -cpu max +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Paolo Bonzini +Message-id: <20200217162323.2572-7-pbonzini@redhat.com> +Patchwork-id: 93905 +O-Subject: [RHEL7.9 qemu-kvm-rhev PATCH 6/6] target/i386: enable monitor and ucode revision with -cpu max +Bugzilla: 1791653 +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Philippe Mathieu-Daudé +RH-Acked-by: Dr. David Alan Gilbert + +These two features were incorrectly tied to host_cpuid_required rather than +cpu->max_features. As a result, -cpu max was not enabling either MONITOR +features or ucode revision. + +Signed-off-by: Paolo Bonzini +(cherry picked from commit be02cda3afde60d219786e23c3f8edb53aec8e17) + +[RHEL7: only affects microcode revision; plus, upstream uses g_autofree] + +Signed-off-by: Jon Maloy +--- + target/i386/cpu.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 3579bd53c5..88af0f4348 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -4943,7 +4943,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) + g_free(name); + goto out; + } ++ } + ++ if (cpu->max_features && accel_uses_host_cpuid()) { + if (kvm_enabled() && cpu->ucode_rev == 0) { + cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, + MSR_IA32_UCODE_REV); +-- +2.18.2 + diff --git a/SOURCES/kvm-target-i386-fix-TCG-UCODE_REV-access.patch b/SOURCES/kvm-target-i386-fix-TCG-UCODE_REV-access.patch new file mode 100644 index 0000000..58e7536 --- /dev/null +++ b/SOURCES/kvm-target-i386-fix-TCG-UCODE_REV-access.patch @@ -0,0 +1,76 @@ +From 836c2251308bb9a3a8355e09c2d89c53526aa898 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Mon, 17 Feb 2020 16:23:21 -0500 +Subject: [PATCH 06/12] target/i386: fix TCG UCODE_REV access +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Paolo Bonzini +Message-id: <20200217162323.2572-5-pbonzini@redhat.com> +Patchwork-id: 93906 +O-Subject: [RHEL7.9 qemu-kvm-rhev PATCH 4/6] target/i386: fix TCG UCODE_REV access +Bugzilla: 1791653 +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Philippe Mathieu-Daudé +RH-Acked-by: Dr. David Alan Gilbert + +This was a very interesting semantic conflict that caused git to move +the MSR_IA32_UCODE_REV read to helper_wrmsr. Not a big deal, but +still should be fixed... + +Fixes: 4e45aff398 ("target/i386: add a ucode-rev property", 2020-01-24) +Message-id: <20200206171022.9289-1-pbonzini@redhat.com> +Signed-off-by: Paolo Bonzini +(cherry picked from commit 9028c75c9d08be303ccc425bfe3d3b23d8f4cac7) + +[RHEL7: replace env_archcpu with x86_env_get_cpu] + +Signed-off-by: Jon Maloy +--- + target/i386/misc_helper.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c +index f93e61f05d..42daf36764 100644 +--- a/target/i386/misc_helper.c ++++ b/target/i386/misc_helper.c +@@ -229,7 +229,6 @@ void helper_rdmsr(CPUX86State *env) + #else + void helper_wrmsr(CPUX86State *env) + { +- X86CPU *x86_cpu = x86_env_get_cpu(env); + uint64_t val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); +@@ -372,9 +371,6 @@ void helper_wrmsr(CPUX86State *env) + env->msr_bndcfgs = val; + cpu_sync_bndcs_hflags(env); + break; +- case MSR_IA32_UCODE_REV: +- val = x86_cpu->ucode_rev; +- break; + default: + if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL + && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + +@@ -393,6 +389,7 @@ void helper_wrmsr(CPUX86State *env) + + void helper_rdmsr(CPUX86State *env) + { ++ X86CPU *x86_cpu = x86_env_get_cpu(env); + uint64_t val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); +@@ -523,6 +520,9 @@ void helper_rdmsr(CPUX86State *env) + case MSR_IA32_BNDCFGS: + val = env->msr_bndcfgs; + break; ++ case MSR_IA32_UCODE_REV: ++ val = x86_cpu->ucode_rev; ++ break; + default: + if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL + && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + +-- +2.18.2 + diff --git a/SOURCES/kvm-target-i386-kvm-initialize-feature-MSRs-very-early.patch b/SOURCES/kvm-target-i386-kvm-initialize-feature-MSRs-very-early.patch new file mode 100644 index 0000000..015d172 --- /dev/null +++ b/SOURCES/kvm-target-i386-kvm-initialize-feature-MSRs-very-early.patch @@ -0,0 +1,148 @@ +From d72e41f9a16360eb23e9d943fa7e33291c5fcd87 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Mon, 17 Feb 2020 16:23:18 -0500 +Subject: [PATCH 03/12] target/i386: kvm: initialize feature MSRs very early +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Paolo Bonzini +Message-id: <20200217162323.2572-2-pbonzini@redhat.com> +Patchwork-id: 93896 +O-Subject: [RHEL7.9 qemu-kvm-rhev PATCH 1/6] target/i386: kvm: initialize feature MSRs very early +Bugzilla: 1791653 +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Philippe Mathieu-Daudé +RH-Acked-by: Dr. David Alan Gilbert + +Some read-only MSRs affect the behavior of ioctls such as +KVM_SET_NESTED_STATE. We can initialize them once and for all +right after the CPU is realized, since they will never be modified +by the guest. + +Reported-by: Qingua Cheng +Cc: qemu-stable@nongnu.org +Signed-off-by: Paolo Bonzini +Message-Id: <1579544504-3616-2-git-send-email-pbonzini@redhat.com> +Signed-off-by: Paolo Bonzini +(cherry picked from commit 420ae1fc51c99abfd03b1c590f55617edd2a2bed) + +[RHEL7: no MSR_IA32_CORE_CAPABILITY] + +Signed-off-by: Jon Maloy +--- + target/i386/kvm.c | 55 +++++++++++++++++++++++++++--------------- + target/i386/kvm_i386.h | 1 + + 2 files changed, 36 insertions(+), 20 deletions(-) + +diff --git a/target/i386/kvm.c b/target/i386/kvm.c +index a6e5a87cf5..d8a4dbfde3 100644 +--- a/target/i386/kvm.c ++++ b/target/i386/kvm.c +@@ -65,6 +65,8 @@ + * 255 kvm_msr_entry structs */ + #define MSR_BUF_SIZE 4096 + ++static void kvm_init_msrs(X86CPU *cpu); ++ + const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_INFO(SET_TSS_ADDR), + KVM_CAP_INFO(EXT_CPUID), +@@ -1175,6 +1177,8 @@ int kvm_arch_init_vcpu(CPUState *cs) + has_msr_tsc_aux = false; + } + ++ kvm_init_msrs(cpu); ++ + return 0; + + fail: +@@ -1797,11 +1801,40 @@ static int kvm_put_msr_feature_control(X86CPU *cpu) + return 0; + } + ++static int kvm_buf_set_msrs(X86CPU *cpu) ++{ ++ int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); ++ if (ret < 0) { ++ return ret; ++ } ++ ++ if (ret < cpu->kvm_msr_buf->nmsrs) { ++ struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; ++ error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, ++ (uint32_t)e->index, (uint64_t)e->data); ++ } ++ ++ assert(ret == cpu->kvm_msr_buf->nmsrs); ++ return 0; ++} ++ ++static void kvm_init_msrs(X86CPU *cpu) ++{ ++ CPUX86State *env = &cpu->env; ++ ++ kvm_msr_buf_reset(cpu); ++ if (has_msr_arch_capabs) { ++ kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, ++ env->features[FEAT_ARCH_CAPABILITIES]); ++ } ++ ++ assert(kvm_buf_set_msrs(cpu) == 0); ++} ++ + static int kvm_put_msrs(X86CPU *cpu, int level) + { + CPUX86State *env = &cpu->env; + int i; +- int ret; + + kvm_msr_buf_reset(cpu); + +@@ -1856,12 +1889,6 @@ static int kvm_put_msrs(X86CPU *cpu, int level) + } + #endif + +- /* If host supports feature MSR, write down. */ +- if (has_msr_arch_capabs) { +- kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, +- env->features[FEAT_ARCH_CAPABILITIES]); +- } +- + /* + * The following MSRs have side effects on the guest or are too heavy + * for normal writeback. Limit them to reset or full state updates. +@@ -2040,19 +2067,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level) + } + } + +- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); +- if (ret < 0) { +- return ret; +- } +- +- if (ret < cpu->kvm_msr_buf->nmsrs) { +- struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; +- error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, +- (uint32_t)e->index, (uint64_t)e->data); +- } +- +- assert(ret == cpu->kvm_msr_buf->nmsrs); +- return 0; ++ return kvm_buf_set_msrs(cpu); + } + + +diff --git a/target/i386/kvm_i386.h b/target/i386/kvm_i386.h +index 1de9876cd9..856044750e 100644 +--- a/target/i386/kvm_i386.h ++++ b/target/i386/kvm_i386.h +@@ -69,4 +69,5 @@ void kvm_put_apicbase(X86CPU *cpu, uint64_t value); + + bool kvm_enable_x2apic(void); + bool kvm_has_x2apic_api(void); ++ + #endif +-- +2.18.2 + diff --git a/SOURCES/kvm-target-i386-kvm-initialize-microcode-revision-from-K.patch b/SOURCES/kvm-target-i386-kvm-initialize-microcode-revision-from-K.patch new file mode 100644 index 0000000..e26a2de --- /dev/null +++ b/SOURCES/kvm-target-i386-kvm-initialize-microcode-revision-from-K.patch @@ -0,0 +1,107 @@ +From 3a740f27c76ccd7fa2c0ece979a480bece592674 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Mon, 17 Feb 2020 16:23:20 -0500 +Subject: [PATCH 05/12] target/i386: kvm: initialize microcode revision from + KVM +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Paolo Bonzini +Message-id: <20200217162323.2572-4-pbonzini@redhat.com> +Patchwork-id: 93900 +O-Subject: [RHEL7.9 qemu-kvm-rhev PATCH 3/6] target/i386: kvm: initialize microcode revision from KVM +Bugzilla: 1791653 +RH-Acked-by: Philippe Mathieu-Daudé +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Dr. David Alan Gilbert + +KVM can return the host microcode revision as a feature MSR. +Use it as the default value for -cpu host. + +Signed-off-by: Paolo Bonzini +Message-Id: <1579544504-3616-4-git-send-email-pbonzini@redhat.com> +Signed-off-by: Paolo Bonzini +(cherry picked from commit 32c87d70ff55b96741f08c35108935cac6f40fe4) + +[RHEL7: change kvm_arch_get_supported_msr_feature to return 64-bit + value, originally done for VMX features; split "if" statement in + two parts, originally done for upstream commit 2266d44311 + ("i386/cpu: make -cpu host support monitor/mwait", 2018-06-29)] + +Signed-off-by: Jon Maloy +--- + include/sysemu/kvm.h | 2 +- + target/i386/cpu.c | 17 ++++++++++++----- + target/i386/kvm.c | 7 ++++++- + 3 files changed, 19 insertions(+), 7 deletions(-) + +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index 3d8f294633..cd9a0206ce 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -464,7 +464,7 @@ int kvm_vm_check_extension(KVMState *s, unsigned int extension); + + uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function, + uint32_t index, int reg); +-uint32_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index); ++uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index); + + + void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len); +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index ad905d6f8c..3579bd53c5 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -4936,11 +4936,18 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) + Error *local_err = NULL; + static bool ht_warned; + +- if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { +- char *name = x86_cpu_class_get_model_name(xcc); +- error_setg(&local_err, "CPU model '%s' requires KVM", name); +- g_free(name); +- goto out; ++ if (xcc->host_cpuid_required) { ++ if (!accel_uses_host_cpuid()) { ++ char *name = x86_cpu_class_get_model_name(xcc); ++ error_setg(&local_err, "CPU model '%s' requires KVM", name); ++ g_free(name); ++ goto out; ++ } ++ ++ if (kvm_enabled() && cpu->ucode_rev == 0) { ++ cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, ++ MSR_IA32_UCODE_REV); ++ } + } + + if (cpu->ucode_rev == 0) { +diff --git a/target/i386/kvm.c b/target/i386/kvm.c +index d8a4dbfde3..4d43fba716 100644 +--- a/target/i386/kvm.c ++++ b/target/i386/kvm.c +@@ -419,7 +419,7 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, + return ret; + } + +-uint32_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) ++uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) + { + struct { + struct kvm_msrs info; +@@ -1828,6 +1828,11 @@ static void kvm_init_msrs(X86CPU *cpu) + env->features[FEAT_ARCH_CAPABILITIES]); + } + ++ if (kvm_arch_get_supported_msr_feature(kvm_state, ++ MSR_IA32_UCODE_REV)) { ++ kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); ++ } ++ + assert(kvm_buf_set_msrs(cpu) == 0); + } + +-- +2.18.2 + diff --git a/SOURCES/kvm-virtio-add-ability-to-delete-vq-through-a-pointer.patch b/SOURCES/kvm-virtio-add-ability-to-delete-vq-through-a-pointer.patch new file mode 100644 index 0000000..e31d135 --- /dev/null +++ b/SOURCES/kvm-virtio-add-ability-to-delete-vq-through-a-pointer.patch @@ -0,0 +1,74 @@ +From 928998018b7c1665c5f033e1609ae32e27a3d2c2 Mon Sep 17 00:00:00 2001 +From: Julia Suvorova +Date: Wed, 4 Mar 2020 20:07:52 -0500 +Subject: [PATCH 10/12] virtio: add ability to delete vq through a pointer + +RH-Author: Julia Suvorova +Message-id: <20200304200754.32708-3-jusual@redhat.com> +Patchwork-id: 94153 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 2/4] virtio: add ability to delete vq through a pointer +Bugzilla: 1721403 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Michael S. Tsirkin + +From: "Michael S. Tsirkin" + +Devices tend to maintain vq pointers, allow deleting them trough a vq pointer. + +Signed-off-by: Michael S. Tsirkin +Reviewed-by: David Hildenbrand +Reviewed-by: David Hildenbrand +(cherry picked from commit 722f8c51d8af223751dfb1d02de40043e8ba067e) +Signed-off-by: Jon Maloy +--- + hw/virtio/virtio.c | 13 +++++++++---- + include/hw/virtio/virtio.h | 2 ++ + 2 files changed, 11 insertions(+), 4 deletions(-) + +diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c +index d228b9297d..29d8502500 100644 +--- a/hw/virtio/virtio.c ++++ b/hw/virtio/virtio.c +@@ -1602,16 +1602,21 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, + return &vdev->vq[i]; + } + ++void virtio_delete_queue(VirtQueue *vq) ++{ ++ vq->vring.num = 0; ++ vq->vring.num_default = 0; ++ vq->handle_output = NULL; ++ vq->handle_aio_output = NULL; ++} ++ + void virtio_del_queue(VirtIODevice *vdev, int n) + { + if (n < 0 || n >= VIRTIO_QUEUE_MAX) { + abort(); + } + +- vdev->vq[n].vring.num = 0; +- vdev->vq[n].vring.num_default = 0; +- vdev->vq[n].handle_output = NULL; +- vdev->vq[n].handle_aio_output = NULL; ++ virtio_delete_queue(&vdev->vq[n]); + } + + static void virtio_set_isr(VirtIODevice *vdev, int value) +diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h +index 302975889b..8b348be70c 100644 +--- a/include/hw/virtio/virtio.h ++++ b/include/hw/virtio/virtio.h +@@ -164,6 +164,8 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, + + void virtio_del_queue(VirtIODevice *vdev, int n); + ++void virtio_delete_queue(VirtQueue *vq); ++ + void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len); + void virtqueue_flush(VirtQueue *vq, unsigned int count); +-- +2.18.2 + diff --git a/SOURCES/kvm-virtio-net-delete-also-control-queue-when-TX-RX-dele.patch b/SOURCES/kvm-virtio-net-delete-also-control-queue-when-TX-RX-dele.patch new file mode 100644 index 0000000..3bade5e --- /dev/null +++ b/SOURCES/kvm-virtio-net-delete-also-control-queue-when-TX-RX-dele.patch @@ -0,0 +1,53 @@ +From a1fd8f3e5f2faeccd08a617c0208dd786cf4a6d7 Mon Sep 17 00:00:00 2001 +From: Julia Suvorova +Date: Wed, 4 Mar 2020 20:07:54 -0500 +Subject: [PATCH 12/12] virtio-net: delete also control queue when TX/RX + deleted + +RH-Author: Julia Suvorova +Message-id: <20200304200754.32708-5-jusual@redhat.com> +Patchwork-id: 94155 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 4/4] virtio-net: delete also control queue when TX/RX deleted +Bugzilla: 1721403 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Michael S. Tsirkin + +From: Yuri Benditovich + +https://bugzilla.redhat.com/show_bug.cgi?id=1708480 +If the control queue is not deleted together with TX/RX, it +later will be ignored in freeing cache resources and hot +unplug will not be completed. + +Cc: qemu-stable@nongnu.org +Signed-off-by: Yuri Benditovich +Message-Id: <20191226043649.14481-3-yuri.benditovich@daynix.com> +Reviewed-by: Michael S. Tsirkin +Signed-off-by: Michael S. Tsirkin +(cherry picked from commit d945d9f1731244ef341f74ede93120fc9de35913) +Signed-off-by: Jon Maloy +--- + hw/net/virtio-net.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c +index 90502fca7c..c4896184ed 100644 +--- a/hw/net/virtio-net.c ++++ b/hw/net/virtio-net.c +@@ -2100,8 +2100,12 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) + virtio_net_del_queue(n, i); + } + ++ /* delete also control vq */ ++ virtio_del_queue(vdev, max_queues * 2); ++ + timer_del(n->announce_timer); + timer_free(n->announce_timer); ++ + g_free(n->vqs); + qemu_del_nic(n->nic); + virtio_cleanup(vdev); +-- +2.18.2 + diff --git a/SOURCES/kvm-virtio-reset-region-cache-when-on-queue-deletion.patch b/SOURCES/kvm-virtio-reset-region-cache-when-on-queue-deletion.patch new file mode 100644 index 0000000..e1e7418 --- /dev/null +++ b/SOURCES/kvm-virtio-reset-region-cache-when-on-queue-deletion.patch @@ -0,0 +1,46 @@ +From eec59692f7c3a776a3b5d01a367a3f467f403941 Mon Sep 17 00:00:00 2001 +From: Julia Suvorova +Date: Wed, 4 Mar 2020 20:07:53 -0500 +Subject: [PATCH 11/12] virtio: reset region cache when on queue deletion + +RH-Author: Julia Suvorova +Message-id: <20200304200754.32708-4-jusual@redhat.com> +Patchwork-id: 94154 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 3/4] virtio: reset region cache when on queue deletion +Bugzilla: 1721403 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Maxim Levitsky +RH-Acked-by: Michael S. Tsirkin + +From: Yuri Benditovich + +https://bugzilla.redhat.com/show_bug.cgi?id=1708480 +Fix leak of region reference that prevents complete +device deletion on hot unplug. + +Cc: qemu-stable@nongnu.org +Signed-off-by: Yuri Benditovich +Message-Id: <20191226043649.14481-2-yuri.benditovich@daynix.com> +Reviewed-by: Michael S. Tsirkin +Signed-off-by: Michael S. Tsirkin +(cherry picked from commit 421afd2fe8dd4603216cbf36081877c391f5a2a4) +Signed-off-by: Jon Maloy +--- + hw/virtio/virtio.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c +index 29d8502500..9d624a3529 100644 +--- a/hw/virtio/virtio.c ++++ b/hw/virtio/virtio.c +@@ -1608,6 +1608,7 @@ void virtio_delete_queue(VirtQueue *vq) + vq->vring.num_default = 0; + vq->handle_output = NULL; + vq->handle_aio_output = NULL; ++ virtio_virtqueue_reset_region_cache(vq); + } + + void virtio_del_queue(VirtIODevice *vdev, int n) +-- +2.18.2 + diff --git a/SOURCES/kvm-vnc-add-magic-cookie-to-VncState.patch b/SOURCES/kvm-vnc-add-magic-cookie-to-VncState.patch new file mode 100644 index 0000000..2e11d0f --- /dev/null +++ b/SOURCES/kvm-vnc-add-magic-cookie-to-VncState.patch @@ -0,0 +1,156 @@ +From c45e31fa19147b42c2e7c6a3ded711c24701f7a2 Mon Sep 17 00:00:00 2001 +From: jmaloy +Date: Thu, 28 May 2020 12:22:25 +0200 +Subject: [PATCH 1/2] vnc: add magic cookie to VncState + +RH-Author: jmaloy +Message-id: <20200508213316.1251860-2-jmaloy@redhat.com> +Patchwork-id: 96349 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 1/2] vnc: add magic cookie to VncState +Bugzilla: 1810409 +RH-Acked-by: Gerd Hoffmann +RH-Acked-by: John Snow +RH-Acked-by: Daniel P. Berrange + +From: Gerd Hoffmann + +Set magic cookie on initialization. Clear on cleanup. Sprinkle a bunch +of assert()s checking the cookie, to verify the pointer is valid. + +Signed-off-by: Gerd Hoffmann +Message-id: 20180507102254.12107-1-kraxel@redhat.com + +(cherry picked from commit f31f9c1080d8907c95f1501c6abab038eceb5490) +Signed-off-by: Jon Maloy +Signed-off-by: Miroslav Rezanina +--- + ui/vnc-jobs.c | 4 ++++ + ui/vnc.c | 10 +++++++++- + ui/vnc.h | 3 +++ + 3 files changed, 16 insertions(+), 1 deletion(-) + +diff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c +index 868ddde..b0b15d4 100644 +--- a/ui/vnc-jobs.c ++++ b/ui/vnc-jobs.c +@@ -82,6 +82,7 @@ VncJob *vnc_job_new(VncState *vs) + { + VncJob *job = g_new0(VncJob, 1); + ++ assert(vs->magic == VNC_MAGIC); + job->vs = vs; + vnc_lock_queue(queue); + QLIST_INIT(&job->rectangles); +@@ -214,6 +215,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) + /* Here job can only be NULL if queue->exit is true */ + job = QTAILQ_FIRST(&queue->jobs); + vnc_unlock_queue(queue); ++ assert(job->vs->magic == VNC_MAGIC); + + if (queue->exit) { + return -1; +@@ -236,6 +238,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) + + /* Make a local copy of vs and switch output buffers */ + vnc_async_encoding_start(job->vs, &vs); ++ vs.magic = VNC_MAGIC; + + /* Start sending rectangles */ + n_rectangles = 0; +@@ -289,6 +292,7 @@ disconnected: + vnc_unlock_queue(queue); + qemu_cond_broadcast(&queue->cond); + g_free(job); ++ vs.magic = 0; + return 0; + } + +diff --git a/ui/vnc.c b/ui/vnc.c +index 86c6762..fbd0da1 100644 +--- a/ui/vnc.c ++++ b/ui/vnc.c +@@ -1138,6 +1138,7 @@ static void audio_capture_notify(void *opaque, audcnotification_e cmd) + { + VncState *vs = opaque; + ++ assert(vs->magic == VNC_MAGIC); + switch (cmd) { + case AUD_CNOTIFY_DISABLE: + vnc_lock_output(vs); +@@ -1167,6 +1168,7 @@ static void audio_capture(void *opaque, void *buf, int size) + { + VncState *vs = opaque; + ++ assert(vs->magic == VNC_MAGIC); + vnc_lock_output(vs); + if (vs->output.offset < vs->throttle_output_offset) { + vnc_write_u8(vs, VNC_MSG_SERVER_QEMU); +@@ -1275,6 +1277,7 @@ void vnc_disconnect_finish(VncState *vs) + vs->ioc = NULL; + object_unref(OBJECT(vs->sioc)); + vs->sioc = NULL; ++ vs->magic = 0; + g_free(vs); + } + +@@ -1414,7 +1417,7 @@ static void vnc_client_write_locked(VncState *vs) + + static void vnc_client_write(VncState *vs) + { +- ++ assert(vs->magic == VNC_MAGIC); + vnc_lock_output(vs); + if (vs->output.offset) { + vnc_client_write_locked(vs); +@@ -1487,6 +1490,7 @@ static void vnc_jobs_bh(void *opaque) + { + VncState *vs = opaque; + ++ assert(vs->magic == VNC_MAGIC); + vnc_jobs_consume_buffer(vs); + } + +@@ -1537,6 +1541,8 @@ gboolean vnc_client_io(QIOChannel *ioc G_GNUC_UNUSED, + GIOCondition condition, void *opaque) + { + VncState *vs = opaque; ++ ++ assert(vs->magic == VNC_MAGIC); + if (condition & G_IO_IN) { + if (vnc_client_read(vs) < 0) { + goto end; +@@ -1567,6 +1573,7 @@ end: + + void vnc_write(VncState *vs, const void *data, size_t len) + { ++ assert(vs->magic == VNC_MAGIC); + if (vs->disconnecting) { + return; + } +@@ -3063,6 +3070,7 @@ static void vnc_connect(VncDisplay *vd, QIOChannelSocket *sioc, + int i; + + trace_vnc_client_connect(vs, sioc); ++ vs->magic = VNC_MAGIC; + vs->sioc = sioc; + object_ref(OBJECT(vs->sioc)); + vs->ioc = QIO_CHANNEL(sioc); +diff --git a/ui/vnc.h b/ui/vnc.h +index 7b29def..7626329 100644 +--- a/ui/vnc.h ++++ b/ui/vnc.h +@@ -255,8 +255,11 @@ typedef enum { + VNC_STATE_UPDATE_FORCE, + } VncStateUpdate; + ++#define VNC_MAGIC ((uint64_t)0x05b3f069b3d204bb) ++ + struct VncState + { ++ uint64_t magic; + QIOChannelSocket *sioc; /* The underlying socket */ + QIOChannel *ioc; /* The channel currently used for I/O */ + guint ioc_tag; +-- +1.8.3.1 + diff --git a/SOURCES/kvm-vnc-fix-memory-leak-when-vnc-disconnect.patch b/SOURCES/kvm-vnc-fix-memory-leak-when-vnc-disconnect.patch new file mode 100644 index 0000000..56aed77 --- /dev/null +++ b/SOURCES/kvm-vnc-fix-memory-leak-when-vnc-disconnect.patch @@ -0,0 +1,1029 @@ +From 218e7ff50aceac54654f950fbf67ae5f9baf0a5a Mon Sep 17 00:00:00 2001 +From: jmaloy +Date: Thu, 28 May 2020 12:22:30 +0200 +Subject: [PATCH 2/2] vnc: fix memory leak when vnc disconnect + +RH-Author: jmaloy +Message-id: <20200508213316.1251860-3-jmaloy@redhat.com> +Patchwork-id: 96350 +O-Subject: [RHEL-7.9 qemu-kvm-rhev PATCH 2/2] vnc: fix memory leak when vnc disconnect +Bugzilla: 1810409 +RH-Acked-by: Gerd Hoffmann +RH-Acked-by: John Snow +RH-Acked-by: Daniel P. Berrange + +From: Li Qiang + +Currently when qemu receives a vnc connect, it creates a 'VncState' to +represent this connection. In 'vnc_worker_thread_loop' it creates a +local 'VncState'. The connection 'VcnState' and local 'VncState' exchange +data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'. +In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library +opaque data. The 'VncState' used in 'zrle_compress_data' is the local +'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz +library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection +'VncState'. In currently implementation there will be a memory leak when the +vnc disconnect. Following is the asan output backtrack: + +Direct leak of 29760 byte(s) in 5 object(s) allocated from: + 0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3) + 1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb) + 2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7) + 3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87 + 4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344 + 5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919 + 6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271 + 7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340 + 8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502 + 9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb) + 10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb) + +This is because the opaque allocated in 'deflateInit2' is not freed in +'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck' +and in the latter will check whether 's->strm != strm'(libz's data structure). +This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and +not free the data allocated in 'deflateInit2'. + +The reason this happens is that the 'VncState' contains the whole 'VncZrle', +so when calling 'deflateInit2', the 's->strm' will be the local address. +So 's->strm != strm' will be true. + +To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer. +Then the connection 'VncState' and local 'VncState' exchange mechanism will +work as expection. The 'tight' of 'VncState' has the same issue, let's also turn +it to a pointer. + +Reported-by: Ying Fang +Signed-off-by: Li Qiang +Message-id: 20190831153922.121308-1-liq3ea@163.com +Signed-off-by: Gerd Hoffmann + +(cherry picked from commit 6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0) +Signed-off-by: Jon Maloy +Signed-off-by: Miroslav Rezanina +--- + ui/vnc-enc-tight.c | 219 +++++++++++++++++++++++---------------------- + ui/vnc-enc-zlib.c | 11 +-- + ui/vnc-enc-zrle-template.c | 2 +- + ui/vnc-enc-zrle.c | 68 +++++++------- + ui/vnc.c | 28 +++--- + ui/vnc.h | 4 +- + 6 files changed, 170 insertions(+), 162 deletions(-) + +diff --git a/ui/vnc-enc-tight.c b/ui/vnc-enc-tight.c +index f38aceb..9ce2b42 100644 +--- a/ui/vnc-enc-tight.c ++++ b/ui/vnc-enc-tight.c +@@ -117,7 +117,7 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h, + + static bool tight_can_send_png_rect(VncState *vs, int w, int h) + { +- if (vs->tight.type != VNC_ENCODING_TIGHT_PNG) { ++ if (vs->tight->type != VNC_ENCODING_TIGHT_PNG) { + return false; + } + +@@ -145,7 +145,7 @@ tight_detect_smooth_image24(VncState *vs, int w, int h) + int pixels = 0; + int pix, left[3]; + unsigned int errors; +- unsigned char *buf = vs->tight.tight.buffer; ++ unsigned char *buf = vs->tight->tight.buffer; + + /* + * If client is big-endian, color samples begin from the second +@@ -216,7 +216,7 @@ tight_detect_smooth_image24(VncState *vs, int w, int h) + int pixels = 0; \ + int sample, sum, left[3]; \ + unsigned int errors; \ +- unsigned char *buf = vs->tight.tight.buffer; \ ++ unsigned char *buf = vs->tight->tight.buffer; \ + \ + endian = 0; /* FIXME */ \ + \ +@@ -297,8 +297,8 @@ static int + tight_detect_smooth_image(VncState *vs, int w, int h) + { + unsigned int errors; +- int compression = vs->tight.compression; +- int quality = vs->tight.quality; ++ int compression = vs->tight->compression; ++ int quality = vs->tight->quality; + + if (!vs->vd->lossy) { + return 0; +@@ -310,7 +310,7 @@ tight_detect_smooth_image(VncState *vs, int w, int h) + return 0; + } + +- if (vs->tight.quality != (uint8_t)-1) { ++ if (vs->tight->quality != (uint8_t)-1) { + if (w * h < VNC_TIGHT_JPEG_MIN_RECT_SIZE) { + return 0; + } +@@ -321,9 +321,9 @@ tight_detect_smooth_image(VncState *vs, int w, int h) + } + + if (vs->client_pf.bytes_per_pixel == 4) { +- if (vs->tight.pixel24) { ++ if (vs->tight->pixel24) { + errors = tight_detect_smooth_image24(vs, w, h); +- if (vs->tight.quality != (uint8_t)-1) { ++ if (vs->tight->quality != (uint8_t)-1) { + return (errors < tight_conf[quality].jpeg_threshold24); + } + return (errors < tight_conf[compression].gradient_threshold24); +@@ -353,7 +353,7 @@ tight_detect_smooth_image(VncState *vs, int w, int h) + uint##bpp##_t c0, c1, ci; \ + int i, n0, n1; \ + \ +- data = (uint##bpp##_t *)vs->tight.tight.buffer; \ ++ data = (uint##bpp##_t *)vs->tight->tight.buffer; \ + \ + c0 = data[0]; \ + i = 1; \ +@@ -424,9 +424,9 @@ static int tight_fill_palette(VncState *vs, int x, int y, + { + int max; + +- max = count / tight_conf[vs->tight.compression].idx_max_colors_divisor; ++ max = count / tight_conf[vs->tight->compression].idx_max_colors_divisor; + if (max < 2 && +- count >= tight_conf[vs->tight.compression].mono_min_rect_size) { ++ count >= tight_conf[vs->tight->compression].mono_min_rect_size) { + max = 2; + } + if (max >= 256) { +@@ -559,7 +559,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h) + int x, y, c; + + buf32 = (uint32_t *)buf; +- memset(vs->tight.gradient.buffer, 0, w * 3 * sizeof(int)); ++ memset(vs->tight->gradient.buffer, 0, w * 3 * sizeof(int)); + + if (1 /* FIXME */) { + shift[0] = vs->client_pf.rshift; +@@ -576,7 +576,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h) + upper[c] = 0; + here[c] = 0; + } +- prev = (int *)vs->tight.gradient.buffer; ++ prev = (int *)vs->tight->gradient.buffer; + for (x = 0; x < w; x++) { + pix32 = *buf32++; + for (c = 0; c < 3; c++) { +@@ -616,7 +616,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h) + int prediction; \ + int x, y, c; \ + \ +- memset (vs->tight.gradient.buffer, 0, w * 3 * sizeof(int)); \ ++ memset(vs->tight->gradient.buffer, 0, w * 3 * sizeof(int)); \ + \ + endian = 0; /* FIXME */ \ + \ +@@ -632,7 +632,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h) + upper[c] = 0; \ + here[c] = 0; \ + } \ +- prev = (int *)vs->tight.gradient.buffer; \ ++ prev = (int *)vs->tight->gradient.buffer; \ + for (x = 0; x < w; x++) { \ + pix = *buf; \ + if (endian) { \ +@@ -786,7 +786,7 @@ static void extend_solid_area(VncState *vs, int x, int y, int w, int h, + static int tight_init_stream(VncState *vs, int stream_id, + int level, int strategy) + { +- z_streamp zstream = &vs->tight.stream[stream_id]; ++ z_streamp zstream = &vs->tight->stream[stream_id]; + + if (zstream->opaque == NULL) { + int err; +@@ -804,15 +804,15 @@ static int tight_init_stream(VncState *vs, int stream_id, + return -1; + } + +- vs->tight.levels[stream_id] = level; ++ vs->tight->levels[stream_id] = level; + zstream->opaque = vs; + } + +- if (vs->tight.levels[stream_id] != level) { ++ if (vs->tight->levels[stream_id] != level) { + if (deflateParams(zstream, level, strategy) != Z_OK) { + return -1; + } +- vs->tight.levels[stream_id] = level; ++ vs->tight->levels[stream_id] = level; + } + return 0; + } +@@ -840,11 +840,11 @@ static void tight_send_compact_size(VncState *vs, size_t len) + static int tight_compress_data(VncState *vs, int stream_id, size_t bytes, + int level, int strategy) + { +- z_streamp zstream = &vs->tight.stream[stream_id]; ++ z_streamp zstream = &vs->tight->stream[stream_id]; + int previous_out; + + if (bytes < VNC_TIGHT_MIN_TO_COMPRESS) { +- vnc_write(vs, vs->tight.tight.buffer, vs->tight.tight.offset); ++ vnc_write(vs, vs->tight->tight.buffer, vs->tight->tight.offset); + return bytes; + } + +@@ -853,13 +853,13 @@ static int tight_compress_data(VncState *vs, int stream_id, size_t bytes, + } + + /* reserve memory in output buffer */ +- buffer_reserve(&vs->tight.zlib, bytes + 64); ++ buffer_reserve(&vs->tight->zlib, bytes + 64); + + /* set pointers */ +- zstream->next_in = vs->tight.tight.buffer; +- zstream->avail_in = vs->tight.tight.offset; +- zstream->next_out = vs->tight.zlib.buffer + vs->tight.zlib.offset; +- zstream->avail_out = vs->tight.zlib.capacity - vs->tight.zlib.offset; ++ zstream->next_in = vs->tight->tight.buffer; ++ zstream->avail_in = vs->tight->tight.offset; ++ zstream->next_out = vs->tight->zlib.buffer + vs->tight->zlib.offset; ++ zstream->avail_out = vs->tight->zlib.capacity - vs->tight->zlib.offset; + previous_out = zstream->avail_out; + zstream->data_type = Z_BINARY; + +@@ -869,14 +869,14 @@ static int tight_compress_data(VncState *vs, int stream_id, size_t bytes, + return -1; + } + +- vs->tight.zlib.offset = vs->tight.zlib.capacity - zstream->avail_out; ++ vs->tight->zlib.offset = vs->tight->zlib.capacity - zstream->avail_out; + /* ...how much data has actually been produced by deflate() */ + bytes = previous_out - zstream->avail_out; + + tight_send_compact_size(vs, bytes); +- vnc_write(vs, vs->tight.zlib.buffer, bytes); ++ vnc_write(vs, vs->tight->zlib.buffer, bytes); + +- buffer_reset(&vs->tight.zlib); ++ buffer_reset(&vs->tight->zlib); + + return bytes; + } +@@ -927,16 +927,17 @@ static int send_full_color_rect(VncState *vs, int x, int y, int w, int h) + + vnc_write_u8(vs, stream << 4); /* no flushing, no filter */ + +- if (vs->tight.pixel24) { +- tight_pack24(vs, vs->tight.tight.buffer, w * h, &vs->tight.tight.offset); ++ if (vs->tight->pixel24) { ++ tight_pack24(vs, vs->tight->tight.buffer, w * h, ++ &vs->tight->tight.offset); + bytes = 3; + } else { + bytes = vs->client_pf.bytes_per_pixel; + } + + bytes = tight_compress_data(vs, stream, w * h * bytes, +- tight_conf[vs->tight.compression].raw_zlib_level, +- Z_DEFAULT_STRATEGY); ++ tight_conf[vs->tight->compression].raw_zlib_level, ++ Z_DEFAULT_STRATEGY); + + return (bytes >= 0); + } +@@ -947,14 +948,14 @@ static int send_solid_rect(VncState *vs) + + vnc_write_u8(vs, VNC_TIGHT_FILL << 4); /* no flushing, no filter */ + +- if (vs->tight.pixel24) { +- tight_pack24(vs, vs->tight.tight.buffer, 1, &vs->tight.tight.offset); ++ if (vs->tight->pixel24) { ++ tight_pack24(vs, vs->tight->tight.buffer, 1, &vs->tight->tight.offset); + bytes = 3; + } else { + bytes = vs->client_pf.bytes_per_pixel; + } + +- vnc_write(vs, vs->tight.tight.buffer, bytes); ++ vnc_write(vs, vs->tight->tight.buffer, bytes); + return 1; + } + +@@ -963,7 +964,7 @@ static int send_mono_rect(VncState *vs, int x, int y, + { + ssize_t bytes; + int stream = 1; +- int level = tight_conf[vs->tight.compression].mono_zlib_level; ++ int level = tight_conf[vs->tight->compression].mono_zlib_level; + + #ifdef CONFIG_VNC_PNG + if (tight_can_send_png_rect(vs, w, h)) { +@@ -991,26 +992,26 @@ static int send_mono_rect(VncState *vs, int x, int y, + uint32_t buf[2] = {bg, fg}; + size_t ret = sizeof (buf); + +- if (vs->tight.pixel24) { ++ if (vs->tight->pixel24) { + tight_pack24(vs, (unsigned char*)buf, 2, &ret); + } + vnc_write(vs, buf, ret); + +- tight_encode_mono_rect32(vs->tight.tight.buffer, w, h, bg, fg); ++ tight_encode_mono_rect32(vs->tight->tight.buffer, w, h, bg, fg); + break; + } + case 2: + vnc_write(vs, &bg, 2); + vnc_write(vs, &fg, 2); +- tight_encode_mono_rect16(vs->tight.tight.buffer, w, h, bg, fg); ++ tight_encode_mono_rect16(vs->tight->tight.buffer, w, h, bg, fg); + break; + default: + vnc_write_u8(vs, bg); + vnc_write_u8(vs, fg); +- tight_encode_mono_rect8(vs->tight.tight.buffer, w, h, bg, fg); ++ tight_encode_mono_rect8(vs->tight->tight.buffer, w, h, bg, fg); + break; + } +- vs->tight.tight.offset = bytes; ++ vs->tight->tight.offset = bytes; + + bytes = tight_compress_data(vs, stream, bytes, level, Z_DEFAULT_STRATEGY); + return (bytes >= 0); +@@ -1040,7 +1041,7 @@ static void write_palette(int idx, uint32_t color, void *opaque) + static bool send_gradient_rect(VncState *vs, int x, int y, int w, int h) + { + int stream = 3; +- int level = tight_conf[vs->tight.compression].gradient_zlib_level; ++ int level = tight_conf[vs->tight->compression].gradient_zlib_level; + ssize_t bytes; + + if (vs->client_pf.bytes_per_pixel == 1) { +@@ -1050,23 +1051,23 @@ static bool send_gradient_rect(VncState *vs, int x, int y, int w, int h) + vnc_write_u8(vs, (stream | VNC_TIGHT_EXPLICIT_FILTER) << 4); + vnc_write_u8(vs, VNC_TIGHT_FILTER_GRADIENT); + +- buffer_reserve(&vs->tight.gradient, w * 3 * sizeof (int)); ++ buffer_reserve(&vs->tight->gradient, w * 3 * sizeof(int)); + +- if (vs->tight.pixel24) { +- tight_filter_gradient24(vs, vs->tight.tight.buffer, w, h); ++ if (vs->tight->pixel24) { ++ tight_filter_gradient24(vs, vs->tight->tight.buffer, w, h); + bytes = 3; + } else if (vs->client_pf.bytes_per_pixel == 4) { +- tight_filter_gradient32(vs, (uint32_t *)vs->tight.tight.buffer, w, h); ++ tight_filter_gradient32(vs, (uint32_t *)vs->tight->tight.buffer, w, h); + bytes = 4; + } else { +- tight_filter_gradient16(vs, (uint16_t *)vs->tight.tight.buffer, w, h); ++ tight_filter_gradient16(vs, (uint16_t *)vs->tight->tight.buffer, w, h); + bytes = 2; + } + +- buffer_reset(&vs->tight.gradient); ++ buffer_reset(&vs->tight->gradient); + + bytes = w * h * bytes; +- vs->tight.tight.offset = bytes; ++ vs->tight->tight.offset = bytes; + + bytes = tight_compress_data(vs, stream, bytes, + level, Z_FILTERED); +@@ -1077,7 +1078,7 @@ static int send_palette_rect(VncState *vs, int x, int y, + int w, int h, VncPalette *palette) + { + int stream = 2; +- int level = tight_conf[vs->tight.compression].idx_zlib_level; ++ int level = tight_conf[vs->tight->compression].idx_zlib_level; + int colors; + ssize_t bytes; + +@@ -1104,12 +1105,12 @@ static int send_palette_rect(VncState *vs, int x, int y, + palette_iter(palette, write_palette, &priv); + vnc_write(vs, header, sizeof(header)); + +- if (vs->tight.pixel24) { ++ if (vs->tight->pixel24) { + tight_pack24(vs, vs->output.buffer + old_offset, colors, &offset); + vs->output.offset = old_offset + offset; + } + +- tight_encode_indexed_rect32(vs->tight.tight.buffer, w * h, palette); ++ tight_encode_indexed_rect32(vs->tight->tight.buffer, w * h, palette); + break; + } + case 2: +@@ -1119,7 +1120,7 @@ static int send_palette_rect(VncState *vs, int x, int y, + + palette_iter(palette, write_palette, &priv); + vnc_write(vs, header, sizeof(header)); +- tight_encode_indexed_rect16(vs->tight.tight.buffer, w * h, palette); ++ tight_encode_indexed_rect16(vs->tight->tight.buffer, w * h, palette); + break; + } + default: +@@ -1127,7 +1128,7 @@ static int send_palette_rect(VncState *vs, int x, int y, + break; + } + bytes = w * h; +- vs->tight.tight.offset = bytes; ++ vs->tight->tight.offset = bytes; + + bytes = tight_compress_data(vs, stream, bytes, + level, Z_DEFAULT_STRATEGY); +@@ -1146,7 +1147,7 @@ static int send_palette_rect(VncState *vs, int x, int y, + static void jpeg_init_destination(j_compress_ptr cinfo) + { + VncState *vs = cinfo->client_data; +- Buffer *buffer = &vs->tight.jpeg; ++ Buffer *buffer = &vs->tight->jpeg; + + cinfo->dest->next_output_byte = (JOCTET *)buffer->buffer + buffer->offset; + cinfo->dest->free_in_buffer = (size_t)(buffer->capacity - buffer->offset); +@@ -1156,7 +1157,7 @@ static void jpeg_init_destination(j_compress_ptr cinfo) + static boolean jpeg_empty_output_buffer(j_compress_ptr cinfo) + { + VncState *vs = cinfo->client_data; +- Buffer *buffer = &vs->tight.jpeg; ++ Buffer *buffer = &vs->tight->jpeg; + + buffer->offset = buffer->capacity; + buffer_reserve(buffer, 2048); +@@ -1168,7 +1169,7 @@ static boolean jpeg_empty_output_buffer(j_compress_ptr cinfo) + static void jpeg_term_destination(j_compress_ptr cinfo) + { + VncState *vs = cinfo->client_data; +- Buffer *buffer = &vs->tight.jpeg; ++ Buffer *buffer = &vs->tight->jpeg; + + buffer->offset = buffer->capacity - cinfo->dest->free_in_buffer; + } +@@ -1187,7 +1188,7 @@ static int send_jpeg_rect(VncState *vs, int x, int y, int w, int h, int quality) + return send_full_color_rect(vs, x, y, w, h); + } + +- buffer_reserve(&vs->tight.jpeg, 2048); ++ buffer_reserve(&vs->tight->jpeg, 2048); + + cinfo.err = jpeg_std_error(&jerr); + jpeg_create_compress(&cinfo); +@@ -1222,9 +1223,9 @@ static int send_jpeg_rect(VncState *vs, int x, int y, int w, int h, int quality) + + vnc_write_u8(vs, VNC_TIGHT_JPEG << 4); + +- tight_send_compact_size(vs, vs->tight.jpeg.offset); +- vnc_write(vs, vs->tight.jpeg.buffer, vs->tight.jpeg.offset); +- buffer_reset(&vs->tight.jpeg); ++ tight_send_compact_size(vs, vs->tight->jpeg.offset); ++ vnc_write(vs, vs->tight->jpeg.buffer, vs->tight->jpeg.offset); ++ buffer_reset(&vs->tight->jpeg); + + return 1; + } +@@ -1240,7 +1241,7 @@ static void write_png_palette(int idx, uint32_t pix, void *opaque) + VncState *vs = priv->vs; + png_colorp color = &priv->png_palette[idx]; + +- if (vs->tight.pixel24) ++ if (vs->tight->pixel24) + { + color->red = (pix >> vs->client_pf.rshift) & vs->client_pf.rmax; + color->green = (pix >> vs->client_pf.gshift) & vs->client_pf.gmax; +@@ -1267,10 +1268,10 @@ static void png_write_data(png_structp png_ptr, png_bytep data, + { + VncState *vs = png_get_io_ptr(png_ptr); + +- buffer_reserve(&vs->tight.png, vs->tight.png.offset + length); +- memcpy(vs->tight.png.buffer + vs->tight.png.offset, data, length); ++ buffer_reserve(&vs->tight->png, vs->tight->png.offset + length); ++ memcpy(vs->tight->png.buffer + vs->tight->png.offset, data, length); + +- vs->tight.png.offset += length; ++ vs->tight->png.offset += length; + } + + static void png_flush_data(png_structp png_ptr) +@@ -1295,8 +1296,8 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h, + png_infop info_ptr; + png_colorp png_palette = NULL; + pixman_image_t *linebuf; +- int level = tight_png_conf[vs->tight.compression].png_zlib_level; +- int filters = tight_png_conf[vs->tight.compression].png_filters; ++ int level = tight_png_conf[vs->tight->compression].png_zlib_level; ++ int filters = tight_png_conf[vs->tight->compression].png_filters; + uint8_t *buf; + int dy; + +@@ -1340,21 +1341,23 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h, + png_set_PLTE(png_ptr, info_ptr, png_palette, palette_size(palette)); + + if (vs->client_pf.bytes_per_pixel == 4) { +- tight_encode_indexed_rect32(vs->tight.tight.buffer, w * h, palette); ++ tight_encode_indexed_rect32(vs->tight->tight.buffer, w * h, ++ palette); + } else { +- tight_encode_indexed_rect16(vs->tight.tight.buffer, w * h, palette); ++ tight_encode_indexed_rect16(vs->tight->tight.buffer, w * h, ++ palette); + } + } + + png_write_info(png_ptr, info_ptr); + +- buffer_reserve(&vs->tight.png, 2048); ++ buffer_reserve(&vs->tight->png, 2048); + linebuf = qemu_pixman_linebuf_create(PIXMAN_BE_r8g8b8, w); + buf = (uint8_t *)pixman_image_get_data(linebuf); + for (dy = 0; dy < h; dy++) + { + if (color_type == PNG_COLOR_TYPE_PALETTE) { +- memcpy(buf, vs->tight.tight.buffer + (dy * w), w); ++ memcpy(buf, vs->tight->tight.buffer + (dy * w), w); + } else { + qemu_pixman_linebuf_fill(linebuf, vs->vd->server, w, x, y + dy); + } +@@ -1372,27 +1375,27 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h, + + vnc_write_u8(vs, VNC_TIGHT_PNG << 4); + +- tight_send_compact_size(vs, vs->tight.png.offset); +- vnc_write(vs, vs->tight.png.buffer, vs->tight.png.offset); +- buffer_reset(&vs->tight.png); ++ tight_send_compact_size(vs, vs->tight->png.offset); ++ vnc_write(vs, vs->tight->png.buffer, vs->tight->png.offset); ++ buffer_reset(&vs->tight->png); + return 1; + } + #endif /* CONFIG_VNC_PNG */ + + static void vnc_tight_start(VncState *vs) + { +- buffer_reset(&vs->tight.tight); ++ buffer_reset(&vs->tight->tight); + + // make the output buffer be the zlib buffer, so we can compress it later +- vs->tight.tmp = vs->output; +- vs->output = vs->tight.tight; ++ vs->tight->tmp = vs->output; ++ vs->output = vs->tight->tight; + } + + static void vnc_tight_stop(VncState *vs) + { + // switch back to normal output/zlib buffers +- vs->tight.tight = vs->output; +- vs->output = vs->tight.tmp; ++ vs->tight->tight = vs->output; ++ vs->output = vs->tight->tmp; + } + + static int send_sub_rect_nojpeg(VncState *vs, int x, int y, int w, int h, +@@ -1426,9 +1429,9 @@ static int send_sub_rect_jpeg(VncState *vs, int x, int y, int w, int h, + int ret; + + if (colors == 0) { +- if (force || (tight_jpeg_conf[vs->tight.quality].jpeg_full && ++ if (force || (tight_jpeg_conf[vs->tight->quality].jpeg_full && + tight_detect_smooth_image(vs, w, h))) { +- int quality = tight_conf[vs->tight.quality].jpeg_quality; ++ int quality = tight_conf[vs->tight->quality].jpeg_quality; + + ret = send_jpeg_rect(vs, x, y, w, h, quality); + } else { +@@ -1440,9 +1443,9 @@ static int send_sub_rect_jpeg(VncState *vs, int x, int y, int w, int h, + ret = send_mono_rect(vs, x, y, w, h, bg, fg); + } else if (colors <= 256) { + if (force || (colors > 96 && +- tight_jpeg_conf[vs->tight.quality].jpeg_idx && ++ tight_jpeg_conf[vs->tight->quality].jpeg_idx && + tight_detect_smooth_image(vs, w, h))) { +- int quality = tight_conf[vs->tight.quality].jpeg_quality; ++ int quality = tight_conf[vs->tight->quality].jpeg_quality; + + ret = send_jpeg_rect(vs, x, y, w, h, quality); + } else { +@@ -1480,20 +1483,20 @@ static int send_sub_rect(VncState *vs, int x, int y, int w, int h) + qemu_thread_atexit_add(&vnc_tight_cleanup_notifier); + } + +- vnc_framebuffer_update(vs, x, y, w, h, vs->tight.type); ++ vnc_framebuffer_update(vs, x, y, w, h, vs->tight->type); + + vnc_tight_start(vs); + vnc_raw_send_framebuffer_update(vs, x, y, w, h); + vnc_tight_stop(vs); + + #ifdef CONFIG_VNC_JPEG +- if (!vs->vd->non_adaptive && vs->tight.quality != (uint8_t)-1) { ++ if (!vs->vd->non_adaptive && vs->tight->quality != (uint8_t)-1) { + double freq = vnc_update_freq(vs, x, y, w, h); + +- if (freq < tight_jpeg_conf[vs->tight.quality].jpeg_freq_min) { ++ if (freq < tight_jpeg_conf[vs->tight->quality].jpeg_freq_min) { + allow_jpeg = false; + } +- if (freq >= tight_jpeg_conf[vs->tight.quality].jpeg_freq_threshold) { ++ if (freq >= tight_jpeg_conf[vs->tight->quality].jpeg_freq_threshold) { + force_jpeg = true; + vnc_sent_lossy_rect(vs, x, y, w, h); + } +@@ -1503,7 +1506,7 @@ static int send_sub_rect(VncState *vs, int x, int y, int w, int h) + colors = tight_fill_palette(vs, x, y, w * h, &bg, &fg, color_count_palette); + + #ifdef CONFIG_VNC_JPEG +- if (allow_jpeg && vs->tight.quality != (uint8_t)-1) { ++ if (allow_jpeg && vs->tight->quality != (uint8_t)-1) { + ret = send_sub_rect_jpeg(vs, x, y, w, h, bg, fg, colors, + color_count_palette, force_jpeg); + } else { +@@ -1520,7 +1523,7 @@ static int send_sub_rect(VncState *vs, int x, int y, int w, int h) + + static int send_sub_rect_solid(VncState *vs, int x, int y, int w, int h) + { +- vnc_framebuffer_update(vs, x, y, w, h, vs->tight.type); ++ vnc_framebuffer_update(vs, x, y, w, h, vs->tight->type); + + vnc_tight_start(vs); + vnc_raw_send_framebuffer_update(vs, x, y, w, h); +@@ -1538,8 +1541,8 @@ static int send_rect_simple(VncState *vs, int x, int y, int w, int h, + int rw, rh; + int n = 0; + +- max_size = tight_conf[vs->tight.compression].max_rect_size; +- max_width = tight_conf[vs->tight.compression].max_rect_width; ++ max_size = tight_conf[vs->tight->compression].max_rect_size; ++ max_width = tight_conf[vs->tight->compression].max_rect_width; + + if (split && (w > max_width || w * h > max_size)) { + max_sub_width = (w > max_width) ? max_width : w; +@@ -1648,16 +1651,16 @@ static int tight_send_framebuffer_update(VncState *vs, int x, int y, + + if (vs->client_pf.bytes_per_pixel == 4 && vs->client_pf.rmax == 0xFF && + vs->client_pf.bmax == 0xFF && vs->client_pf.gmax == 0xFF) { +- vs->tight.pixel24 = true; ++ vs->tight->pixel24 = true; + } else { +- vs->tight.pixel24 = false; ++ vs->tight->pixel24 = false; + } + + #ifdef CONFIG_VNC_JPEG +- if (vs->tight.quality != (uint8_t)-1) { ++ if (vs->tight->quality != (uint8_t)-1) { + double freq = vnc_update_freq(vs, x, y, w, h); + +- if (freq > tight_jpeg_conf[vs->tight.quality].jpeg_freq_threshold) { ++ if (freq > tight_jpeg_conf[vs->tight->quality].jpeg_freq_threshold) { + return send_rect_simple(vs, x, y, w, h, false); + } + } +@@ -1669,8 +1672,8 @@ static int tight_send_framebuffer_update(VncState *vs, int x, int y, + + /* Calculate maximum number of rows in one non-solid rectangle. */ + +- max_rows = tight_conf[vs->tight.compression].max_rect_size; +- max_rows /= MIN(tight_conf[vs->tight.compression].max_rect_width, w); ++ max_rows = tight_conf[vs->tight->compression].max_rect_size; ++ max_rows /= MIN(tight_conf[vs->tight->compression].max_rect_width, w); + + return find_large_solid_color_rect(vs, x, y, w, h, max_rows); + } +@@ -1678,33 +1681,33 @@ static int tight_send_framebuffer_update(VncState *vs, int x, int y, + int vnc_tight_send_framebuffer_update(VncState *vs, int x, int y, + int w, int h) + { +- vs->tight.type = VNC_ENCODING_TIGHT; ++ vs->tight->type = VNC_ENCODING_TIGHT; + return tight_send_framebuffer_update(vs, x, y, w, h); + } + + int vnc_tight_png_send_framebuffer_update(VncState *vs, int x, int y, + int w, int h) + { +- vs->tight.type = VNC_ENCODING_TIGHT_PNG; ++ vs->tight->type = VNC_ENCODING_TIGHT_PNG; + return tight_send_framebuffer_update(vs, x, y, w, h); + } + + void vnc_tight_clear(VncState *vs) + { + int i; +- for (i=0; itight.stream); i++) { +- if (vs->tight.stream[i].opaque) { +- deflateEnd(&vs->tight.stream[i]); ++ for (i = 0; i < ARRAY_SIZE(vs->tight->stream); i++) { ++ if (vs->tight->stream[i].opaque) { ++ deflateEnd(&vs->tight->stream[i]); + } + } + +- buffer_free(&vs->tight.tight); +- buffer_free(&vs->tight.zlib); +- buffer_free(&vs->tight.gradient); ++ buffer_free(&vs->tight->tight); ++ buffer_free(&vs->tight->zlib); ++ buffer_free(&vs->tight->gradient); + #ifdef CONFIG_VNC_JPEG +- buffer_free(&vs->tight.jpeg); ++ buffer_free(&vs->tight->jpeg); + #endif + #ifdef CONFIG_VNC_PNG +- buffer_free(&vs->tight.png); ++ buffer_free(&vs->tight->png); + #endif + } +diff --git a/ui/vnc-enc-zlib.c b/ui/vnc-enc-zlib.c +index 33e9df2..900ae5b 100644 +--- a/ui/vnc-enc-zlib.c ++++ b/ui/vnc-enc-zlib.c +@@ -76,7 +76,8 @@ static int vnc_zlib_stop(VncState *vs) + zstream->zalloc = vnc_zlib_zalloc; + zstream->zfree = vnc_zlib_zfree; + +- err = deflateInit2(zstream, vs->tight.compression, Z_DEFLATED, MAX_WBITS, ++ err = deflateInit2(zstream, vs->tight->compression, Z_DEFLATED, ++ MAX_WBITS, + MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY); + + if (err != Z_OK) { +@@ -84,16 +85,16 @@ static int vnc_zlib_stop(VncState *vs) + return -1; + } + +- vs->zlib.level = vs->tight.compression; ++ vs->zlib.level = vs->tight->compression; + zstream->opaque = vs; + } + +- if (vs->tight.compression != vs->zlib.level) { +- if (deflateParams(zstream, vs->tight.compression, ++ if (vs->tight->compression != vs->zlib.level) { ++ if (deflateParams(zstream, vs->tight->compression, + Z_DEFAULT_STRATEGY) != Z_OK) { + return -1; + } +- vs->zlib.level = vs->tight.compression; ++ vs->zlib.level = vs->tight->compression; + } + + // reserve memory in output buffer +diff --git a/ui/vnc-enc-zrle-template.c b/ui/vnc-enc-zrle-template.c +index abf6b86..c107d8a 100644 +--- a/ui/vnc-enc-zrle-template.c ++++ b/ui/vnc-enc-zrle-template.c +@@ -96,7 +96,7 @@ static void ZRLE_ENCODE(VncState *vs, int x, int y, int w, int h, + static void ZRLE_ENCODE_TILE(VncState *vs, ZRLE_PIXEL *data, int w, int h, + int zywrle_level) + { +- VncPalette *palette = &vs->zrle.palette; ++ VncPalette *palette = &vs->zrle->palette; + + int runs = 0; + int single_pixels = 0; +diff --git a/ui/vnc-enc-zrle.c b/ui/vnc-enc-zrle.c +index fd63d4f..3d259ed 100644 +--- a/ui/vnc-enc-zrle.c ++++ b/ui/vnc-enc-zrle.c +@@ -37,18 +37,18 @@ static const int bits_per_packed_pixel[] = { + + static void vnc_zrle_start(VncState *vs) + { +- buffer_reset(&vs->zrle.zrle); ++ buffer_reset(&vs->zrle->zrle); + + /* make the output buffer be the zlib buffer, so we can compress it later */ +- vs->zrle.tmp = vs->output; +- vs->output = vs->zrle.zrle; ++ vs->zrle->tmp = vs->output; ++ vs->output = vs->zrle->zrle; + } + + static void vnc_zrle_stop(VncState *vs) + { + /* switch back to normal output/zlib buffers */ +- vs->zrle.zrle = vs->output; +- vs->output = vs->zrle.tmp; ++ vs->zrle->zrle = vs->output; ++ vs->output = vs->zrle->tmp; + } + + static void *zrle_convert_fb(VncState *vs, int x, int y, int w, int h, +@@ -56,24 +56,24 @@ static void *zrle_convert_fb(VncState *vs, int x, int y, int w, int h, + { + Buffer tmp; + +- buffer_reset(&vs->zrle.fb); +- buffer_reserve(&vs->zrle.fb, w * h * bpp + bpp); ++ buffer_reset(&vs->zrle->fb); ++ buffer_reserve(&vs->zrle->fb, w * h * bpp + bpp); + + tmp = vs->output; +- vs->output = vs->zrle.fb; ++ vs->output = vs->zrle->fb; + + vnc_raw_send_framebuffer_update(vs, x, y, w, h); + +- vs->zrle.fb = vs->output; ++ vs->zrle->fb = vs->output; + vs->output = tmp; +- return vs->zrle.fb.buffer; ++ return vs->zrle->fb.buffer; + } + + static int zrle_compress_data(VncState *vs, int level) + { +- z_streamp zstream = &vs->zrle.stream; ++ z_streamp zstream = &vs->zrle->stream; + +- buffer_reset(&vs->zrle.zlib); ++ buffer_reset(&vs->zrle->zlib); + + if (zstream->opaque != vs) { + int err; +@@ -93,13 +93,13 @@ static int zrle_compress_data(VncState *vs, int level) + } + + /* reserve memory in output buffer */ +- buffer_reserve(&vs->zrle.zlib, vs->zrle.zrle.offset + 64); ++ buffer_reserve(&vs->zrle->zlib, vs->zrle->zrle.offset + 64); + + /* set pointers */ +- zstream->next_in = vs->zrle.zrle.buffer; +- zstream->avail_in = vs->zrle.zrle.offset; +- zstream->next_out = vs->zrle.zlib.buffer + vs->zrle.zlib.offset; +- zstream->avail_out = vs->zrle.zlib.capacity - vs->zrle.zlib.offset; ++ zstream->next_in = vs->zrle->zrle.buffer; ++ zstream->avail_in = vs->zrle->zrle.offset; ++ zstream->next_out = vs->zrle->zlib.buffer + vs->zrle->zlib.offset; ++ zstream->avail_out = vs->zrle->zlib.capacity - vs->zrle->zlib.offset; + zstream->data_type = Z_BINARY; + + /* start encoding */ +@@ -108,8 +108,8 @@ static int zrle_compress_data(VncState *vs, int level) + return -1; + } + +- vs->zrle.zlib.offset = vs->zrle.zlib.capacity - zstream->avail_out; +- return vs->zrle.zlib.offset; ++ vs->zrle->zlib.offset = vs->zrle->zlib.capacity - zstream->avail_out; ++ return vs->zrle->zlib.offset; + } + + /* Try to work out whether to use RLE and/or a palette. We do this by +@@ -259,14 +259,14 @@ static int zrle_send_framebuffer_update(VncState *vs, int x, int y, + size_t bytes; + int zywrle_level; + +- if (vs->zrle.type == VNC_ENCODING_ZYWRLE) { +- if (!vs->vd->lossy || vs->tight.quality == (uint8_t)-1 +- || vs->tight.quality == 9) { ++ if (vs->zrle->type == VNC_ENCODING_ZYWRLE) { ++ if (!vs->vd->lossy || vs->tight->quality == (uint8_t)-1 ++ || vs->tight->quality == 9) { + zywrle_level = 0; +- vs->zrle.type = VNC_ENCODING_ZRLE; +- } else if (vs->tight.quality < 3) { ++ vs->zrle->type = VNC_ENCODING_ZRLE; ++ } else if (vs->tight->quality < 3) { + zywrle_level = 3; +- } else if (vs->tight.quality < 6) { ++ } else if (vs->tight->quality < 6) { + zywrle_level = 2; + } else { + zywrle_level = 1; +@@ -337,30 +337,30 @@ static int zrle_send_framebuffer_update(VncState *vs, int x, int y, + + vnc_zrle_stop(vs); + bytes = zrle_compress_data(vs, Z_DEFAULT_COMPRESSION); +- vnc_framebuffer_update(vs, x, y, w, h, vs->zrle.type); ++ vnc_framebuffer_update(vs, x, y, w, h, vs->zrle->type); + vnc_write_u32(vs, bytes); +- vnc_write(vs, vs->zrle.zlib.buffer, vs->zrle.zlib.offset); ++ vnc_write(vs, vs->zrle->zlib.buffer, vs->zrle->zlib.offset); + return 1; + } + + int vnc_zrle_send_framebuffer_update(VncState *vs, int x, int y, int w, int h) + { +- vs->zrle.type = VNC_ENCODING_ZRLE; ++ vs->zrle->type = VNC_ENCODING_ZRLE; + return zrle_send_framebuffer_update(vs, x, y, w, h); + } + + int vnc_zywrle_send_framebuffer_update(VncState *vs, int x, int y, int w, int h) + { +- vs->zrle.type = VNC_ENCODING_ZYWRLE; ++ vs->zrle->type = VNC_ENCODING_ZYWRLE; + return zrle_send_framebuffer_update(vs, x, y, w, h); + } + + void vnc_zrle_clear(VncState *vs) + { +- if (vs->zrle.stream.opaque) { +- deflateEnd(&vs->zrle.stream); ++ if (vs->zrle->stream.opaque) { ++ deflateEnd(&vs->zrle->stream); + } +- buffer_free(&vs->zrle.zrle); +- buffer_free(&vs->zrle.fb); +- buffer_free(&vs->zrle.zlib); ++ buffer_free(&vs->zrle->zrle); ++ buffer_free(&vs->zrle->fb); ++ buffer_free(&vs->zrle->zlib); + } +diff --git a/ui/vnc.c b/ui/vnc.c +index fbd0da1..821e664 100644 +--- a/ui/vnc.c ++++ b/ui/vnc.c +@@ -1278,6 +1278,8 @@ void vnc_disconnect_finish(VncState *vs) + object_unref(OBJECT(vs->sioc)); + vs->sioc = NULL; + vs->magic = 0; ++ g_free(vs->zrle); ++ g_free(vs->tight); + g_free(vs); + } + +@@ -2087,8 +2089,8 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings) + + vs->features = 0; + vs->vnc_encoding = 0; +- vs->tight.compression = 9; +- vs->tight.quality = -1; /* Lossless by default */ ++ vs->tight->compression = 9; ++ vs->tight->quality = -1; /* Lossless by default */ + vs->absolute = -1; + + /* +@@ -2156,11 +2158,11 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings) + vs->features |= VNC_FEATURE_LED_STATE_MASK; + break; + case VNC_ENCODING_COMPRESSLEVEL0 ... VNC_ENCODING_COMPRESSLEVEL0 + 9: +- vs->tight.compression = (enc & 0x0F); ++ vs->tight->compression = (enc & 0x0F); + break; + case VNC_ENCODING_QUALITYLEVEL0 ... VNC_ENCODING_QUALITYLEVEL0 + 9: + if (vs->vd->lossy) { +- vs->tight.quality = (enc & 0x0F); ++ vs->tight->quality = (enc & 0x0F); + } + break; + default: +@@ -3070,6 +3072,8 @@ static void vnc_connect(VncDisplay *vd, QIOChannelSocket *sioc, + int i; + + trace_vnc_client_connect(vs, sioc); ++ vs->zrle = g_new0(VncZrle, 1); ++ vs->tight = g_new0(VncTight, 1); + vs->magic = VNC_MAGIC; + vs->sioc = sioc; + object_ref(OBJECT(vs->sioc)); +@@ -3081,19 +3085,19 @@ static void vnc_connect(VncDisplay *vd, QIOChannelSocket *sioc, + buffer_init(&vs->output, "vnc-output/%p", sioc); + buffer_init(&vs->jobs_buffer, "vnc-jobs_buffer/%p", sioc); + +- buffer_init(&vs->tight.tight, "vnc-tight/%p", sioc); +- buffer_init(&vs->tight.zlib, "vnc-tight-zlib/%p", sioc); +- buffer_init(&vs->tight.gradient, "vnc-tight-gradient/%p", sioc); ++ buffer_init(&vs->tight->tight, "vnc-tight/%p", sioc); ++ buffer_init(&vs->tight->zlib, "vnc-tight-zlib/%p", sioc); ++ buffer_init(&vs->tight->gradient, "vnc-tight-gradient/%p", sioc); + #ifdef CONFIG_VNC_JPEG +- buffer_init(&vs->tight.jpeg, "vnc-tight-jpeg/%p", sioc); ++ buffer_init(&vs->tight->jpeg, "vnc-tight-jpeg/%p", sioc); + #endif + #ifdef CONFIG_VNC_PNG +- buffer_init(&vs->tight.png, "vnc-tight-png/%p", sioc); ++ buffer_init(&vs->tight->png, "vnc-tight-png/%p", sioc); + #endif + buffer_init(&vs->zlib.zlib, "vnc-zlib/%p", sioc); +- buffer_init(&vs->zrle.zrle, "vnc-zrle/%p", sioc); +- buffer_init(&vs->zrle.fb, "vnc-zrle-fb/%p", sioc); +- buffer_init(&vs->zrle.zlib, "vnc-zrle-zlib/%p", sioc); ++ buffer_init(&vs->zrle->zrle, "vnc-zrle/%p", sioc); ++ buffer_init(&vs->zrle->fb, "vnc-zrle-fb/%p", sioc); ++ buffer_init(&vs->zrle->zlib, "vnc-zrle-zlib/%p", sioc); + + if (skipauth) { + vs->auth = VNC_AUTH_NONE; +diff --git a/ui/vnc.h b/ui/vnc.h +index 7626329..8d9687c 100644 +--- a/ui/vnc.h ++++ b/ui/vnc.h +@@ -335,10 +335,10 @@ struct VncState + /* Encoding specific, if you add something here, don't forget to + * update vnc_async_encoding_start() + */ +- VncTight tight; ++ VncTight *tight; + VncZlib zlib; + VncHextile hextile; +- VncZrle zrle; ++ VncZrle *zrle; + VncZywrle zywrle; + + Notifier mouse_mode_notifier; +-- +1.8.3.1 + diff --git a/SPECS/qemu-kvm.spec b/SPECS/qemu-kvm.spec index 1f4cbc8..c584a93 100644 --- a/SPECS/qemu-kvm.spec +++ b/SPECS/qemu-kvm.spec @@ -108,7 +108,7 @@ Obsoletes: %1%{rhel_ma_suffix} < %{obsoletes_version2} \ Summary: QEMU is a machine emulator and virtualizer Name: %{pkgname}%{?pkgsuffix} Version: 2.12.0 -Release: 44%{?dist}.1 +Release: 48%{?dist} # Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped Epoch: 10 License: GPLv2 and GPLv2+ and CC-BY @@ -1960,6 +1960,38 @@ Patch898: kvm-iscsi-Cap-block-count-from-GET-LBA-STATUS-CVE-2020-1.patch Patch899: kvm-util-add-slirp_fmt-helpers.patch # For bz#1798974 - CVE-2020-8608 qemu-kvm-ma: QEMU: Slirp: potential OOB access due to unsafe snprintf() usages [rhel-7.8.z] Patch900: kvm-tcp_emu-fix-unsafe-snprintf-usages.patch +# For bz#1802216 - Add support for newer glusterfs +Patch901: kvm-gluster-Handle-changed-glfs_ftruncate-signature.patch +# For bz#1802216 - Add support for newer glusterfs +Patch902: kvm-gluster-the-glfs_io_cbk-callback-function-pointer-ad.patch +# For bz#1791653 - Backport: Passthrough host CPU microcode version to KVM guest if using CPU passthrough to RHEL 7.7/7.8 +Patch903: kvm-target-i386-kvm-initialize-feature-MSRs-very-early.patch +# For bz#1791653 - Backport: Passthrough host CPU microcode version to KVM guest if using CPU passthrough to RHEL 7.7/7.8 +Patch904: kvm-target-i386-add-a-ucode-rev-property.patch +# For bz#1791653 - Backport: Passthrough host CPU microcode version to KVM guest if using CPU passthrough to RHEL 7.7/7.8 +Patch905: kvm-target-i386-kvm-initialize-microcode-revision-from-K.patch +# For bz#1791653 - Backport: Passthrough host CPU microcode version to KVM guest if using CPU passthrough to RHEL 7.7/7.8 +Patch906: kvm-target-i386-fix-TCG-UCODE_REV-access.patch +# For bz#1791653 - Backport: Passthrough host CPU microcode version to KVM guest if using CPU passthrough to RHEL 7.7/7.8 +Patch907: kvm-target-i386-check-for-availability-of-MSR_IA32_UCODE.patch +# For bz#1791653 - Backport: Passthrough host CPU microcode version to KVM guest if using CPU passthrough to RHEL 7.7/7.8 +Patch908: kvm-target-i386-enable-monitor-and-ucode-revision-with-c.patch +# For bz#1721403 - After hot unplug virtio-net and vfio nic, hot plug vfio-pci device fails in Win2019 guest +Patch909: kvm-clean-up-callback-when-del-virtqueue.patch +# For bz#1721403 - After hot unplug virtio-net and vfio nic, hot plug vfio-pci device fails in Win2019 guest +Patch910: kvm-virtio-add-ability-to-delete-vq-through-a-pointer.patch +# For bz#1721403 - After hot unplug virtio-net and vfio nic, hot plug vfio-pci device fails in Win2019 guest +Patch911: kvm-virtio-reset-region-cache-when-on-queue-deletion.patch +# For bz#1721403 - After hot unplug virtio-net and vfio nic, hot plug vfio-pci device fails in Win2019 guest +Patch912: kvm-virtio-net-delete-also-control-queue-when-TX-RX-dele.patch +# For bz#1622976 - CVE-2018-15746 qemu-kvm-ma: Qemu: seccomp: blacklist is not applied to all threads [rhel-7] +Patch913: kvm-seccomp-set-the-seccomp-filter-to-all-threads.patch +# For bz#1819253 - Fix overzealous I/O request splitting performance regression +Patch914: kvm-file-posix-Use-max-transfer-length-segment-count-onl.patch +# For bz#1810409 - CVE-2019-20382 qemu-kvm-rhev: QEMU: vnc: memory leakage upon disconnect [rhel-7] +Patch915: kvm-vnc-add-magic-cookie-to-VncState.patch +# For bz#1810409 - CVE-2019-20382 qemu-kvm-rhev: QEMU: vnc: memory leakage upon disconnect [rhel-7] +Patch916: kvm-vnc-fix-memory-leak-when-vnc-disconnect.patch BuildRequires: zlib-devel BuildRequires: glib2-devel @@ -3077,6 +3109,22 @@ ApplyOptionalPatch() %patch898 -p1 %patch899 -p1 %patch900 -p1 +%patch901 -p1 +%patch902 -p1 +%patch903 -p1 +%patch904 -p1 +%patch905 -p1 +%patch906 -p1 +%patch907 -p1 +%patch908 -p1 +%patch909 -p1 +%patch910 -p1 +%patch911 -p1 +%patch912 -p1 +%patch913 -p1 +%patch914 -p1 +%patch915 -p1 +%patch916 -p1 # Fix executable permission for iotests chmod 755 $(ls tests/qemu-iotests/???) @@ -3597,7 +3645,43 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \ %endif %changelog -* Wed Mar 04 2020 Miroslav Rezanina - ma-2.12.0-44.el7_8.1 +* Thu May 28 2020 Miroslav Rezanina - 2.12.0-48.el7 +- kvm-vnc-add-magic-cookie-to-VncState.patch [bz#1810409] +- kvm-vnc-fix-memory-leak-when-vnc-disconnect.patch [bz#1810409] +- Resolves: bz#1810409 + (CVE-2019-20382 qemu-kvm-rhev: QEMU: vnc: memory leakage upon disconnect [rhel-7]) + +* Mon Apr 27 2020 Miroslav Rezanina - 2.12.0-47.el7 +- kvm-file-posix-Use-max-transfer-length-segment-count-onl.patch [bz#1819253] +- Resolves: bz#1819253 + (Fix overzealous I/O request splitting performance regression) + +* Tue Apr 07 2020 Miroslav Rezanina - 2.12.0-46.el7 +- kvm-seccomp-set-the-seccomp-filter-to-all-threads.patch [bz#1622976] +- Resolves: bz#1622976 + (CVE-2018-15746 qemu-kvm-ma: Qemu: seccomp: blacklist is not applied to all threads [rhel-7]) + +* Tue Mar 17 2020 Jon Maloy - 2.12.0-45.el7 +- kvm-gluster-Handle-changed-glfs_ftruncate-signature.patch [bz#1802216] +- kvm-gluster-the-glfs_io_cbk-callback-function-pointer-ad.patch [bz#1802216] +- kvm-target-i386-kvm-initialize-feature-MSRs-very-early.patch [bz#1791653] +- kvm-target-i386-add-a-ucode-rev-property.patch [bz#1791653] +- kvm-target-i386-kvm-initialize-microcode-revision-from-K.patch [bz#1791653] +- kvm-target-i386-fix-TCG-UCODE_REV-access.patch [bz#1791653] +- kvm-target-i386-check-for-availability-of-MSR_IA32_UCODE.patch [bz#1791653] +- kvm-target-i386-enable-monitor-and-ucode-revision-with-c.patch [bz#1791653] +- kvm-clean-up-callback-when-del-virtqueue.patch [bz#1721403] +- kvm-virtio-add-ability-to-delete-vq-through-a-pointer.patch [bz#1721403] +- kvm-virtio-reset-region-cache-when-on-queue-deletion.patch [bz#1721403] +- kvm-virtio-net-delete-also-control-queue-when-TX-RX-dele.patch [bz#1721403] +- Resolves: bz#1721403 + (After hot unplug virtio-net and vfio nic, hot plug vfio-pci device fails in Win2019 guest) +- Resolves: bz#1791653 + (Backport: Passthrough host CPU microcode version to KVM guest if using CPU passthrough to RHEL 7.7/7.8) +- Resolves: bz#1802216 + (Add support for newer glusterfs) + +* Wed Mar 04 2020 Miroslav Rezanina - 2.12.0-44.el7_8.1 - kvm-util-add-slirp_fmt-helpers.patch [bz#1798974] - kvm-tcp_emu-fix-unsafe-snprintf-usages.patch [bz#1798974] - Resolves: bz#1798974 @@ -5268,7 +5352,7 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \ - Resolves: bz#1517051 (POWER9 - Virt: QEMU: Migration of HPT guest on Radix host fails) -* Tue Dec 05 2017 Miroslav Rezanina - ma-2.10.0-11.el7 +* Tue Dec 05 2017 Miroslav Rezanina - 2.10.0-11.el7 - kvm-qcow2-don-t-permit-changing-encryption-parameters.patch [bz#1406803] - kvm-qcow2-fix-image-corruption-after-committing-qcow2-im.patch [bz#1406803] - kvm-qemu-doc-Add-UUID-support-in-initiator-name.patch [bz#1494210]