summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--meta/classes/cve-check.bbclass31
-rw-r--r--meta/classes/kernel-fitimage.bbclass6
-rw-r--r--meta/classes/kernel-yocto.bbclass3
-rw-r--r--meta/classes/kernel.bbclass2
-rw-r--r--meta/classes/kernelsrc.bbclass2
-rw-r--r--meta/classes/license.bbclass27
-rw-r--r--meta/classes/linuxloader.bbclass2
-rw-r--r--meta/classes/patch.bbclass7
-rw-r--r--meta/classes/pypi.bbclass4
-rw-r--r--meta/classes/relocatable.bbclass20
-rw-r--r--meta/classes/reproducible_build.bbclass40
-rw-r--r--meta/classes/sanity.bbclass12
-rw-r--r--meta/conf/distro/include/maintainers.inc1
-rw-r--r--meta/conf/distro/include/security_flags.inc2
-rw-r--r--meta/conf/distro/include/yocto-uninative.inc10
-rw-r--r--meta/conf/documentation.conf1
-rw-r--r--meta/conf/multilib.conf1
-rw-r--r--meta/files/toolchain-shar-extract.sh13
-rw-r--r--meta/lib/oe/package_manager.py37
-rw-r--r--meta/lib/oe/prservice.py4
-rw-r--r--meta/lib/oe/sstatesig.py30
-rw-r--r--meta/lib/oe/utils.py2
-rw-r--r--meta/lib/oeqa/core/utils/concurrencytest.py2
-rw-r--r--meta/lib/oeqa/sdkext/testsdk.py7
-rw-r--r--meta/lib/oeqa/selftest/cases/incompatible_lic.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/reproducible.py9
-rw-r--r--meta/lib/oeqa/selftest/cases/runtime_test.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/signing.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/sstatetests.py40
-rw-r--r--meta/lib/oeqa/selftest/context.py6
-rw-r--r--meta/lib/oeqa/targetcontrol.py7
-rw-r--r--meta/lib/oeqa/utils/qemurunner.py16
-rw-r--r--meta/recipes-bsp/u-boot/u-boot-tools.inc65
-rw-r--r--meta/recipes-bsp/u-boot/u-boot-tools_2019.07.bb67
-rw-r--r--meta/recipes-bsp/u-boot/u-boot.inc2
-rw-r--r--meta/recipes-connectivity/avahi/avahi.inc5
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch64
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch18
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch60
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-gen.c-extend-DIRNAMESIZE-from-256-to-512.patch22
-rw-r--r--meta/recipes-connectivity/bind/bind/0001-lib-dns-gen.c-fix-too-long-error.patch31
-rw-r--r--meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch670
-rw-r--r--meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch278
-rw-r--r--meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch512
-rw-r--r--meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch911
-rw-r--r--meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch80
-rw-r--r--meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch140
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2020-8622.patch60
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2020-8623.patch402
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2020-8624.patch33
-rw-r--r--meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch15
-rw-r--r--meta/recipes-connectivity/bind/bind_9.11.19.bb (renamed from meta/recipes-connectivity/bind/bind_9.11.5-P4.bb)22
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5.inc2
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2020-0556-1.patch35
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2020-0556-2.patch143
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp/0001-Ensure-context-is-running-prior-to-calling-isc_app_c.patch165
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp/0002-Added-shutdown-log-statment-to-dhcrelay.patch29
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp/0003-Addressed-review-comment.patch31
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp_4.4.1.bb3
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb6
-rw-r--r--meta/recipes-connectivity/nfs-utils/nfs-utils/0001-Disable-statx-if-using-glibc-emulation.patch34
-rw-r--r--meta/recipes-connectivity/nfs-utils/nfs-utils/0001-statd-take-user-id-from-var-lib-nfs-sm.patch102
-rw-r--r--meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb2
-rw-r--r--meta/recipes-connectivity/openssh/openssh/0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch46
-rw-r--r--meta/recipes-connectivity/openssh/openssh_8.0p1.bb1
-rw-r--r--meta/recipes-connectivity/openssl/openssl/CVE-2019-1551.patch758
-rw-r--r--meta/recipes-connectivity/openssl/openssl/reproducible.patch32
-rw-r--r--meta/recipes-connectivity/openssl/openssl_1.1.1g.bb (renamed from meta/recipes-connectivity/openssl/openssl_1.1.1d.bb)7
-rw-r--r--meta/recipes-connectivity/ppp/ppp/0001-pppd-Fix-bounds-check-in-EAP-code.patch47
-rw-r--r--meta/recipes-connectivity/ppp/ppp_2.4.7.bb1
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch151
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch62
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch50
-rw-r--r--meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb11
-rw-r--r--meta/recipes-core/busybox/busybox.inc43
-rw-r--r--meta/recipes-core/dbus/dbus/CVE-2020-12049.patch78
-rw-r--r--meta/recipes-core/dbus/dbus_1.12.16.bb1
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/0020-meson.build-do-not-hardcode-linux-as-the-host-system.patch49
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2020-6750.patch741
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0_2.60.7.bb2
-rw-r--r--meta/recipes-core/glibc/glibc-testsuite_2.30.bb3
-rw-r--r--meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Make-relocatable-install-for-locales.patch35
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-10029.patch128
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-1751.patch70
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-1752.patch66
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch193
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch111
-rw-r--r--meta/recipes-core/glibc/glibc_2.30.bb5
-rw-r--r--meta/recipes-core/images/build-appliance-image_15.0.0.bb2
-rw-r--r--meta/recipes-core/kbd/kbd/0001-configure.ac-Fix-logic-of-vlock-configure-switch.patch31
-rw-r--r--meta/recipes-core/kbd/kbd_2.0.4.bb4
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2019-20388.patch37
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2020-24977.patch41
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2020-7595.patch36
-rw-r--r--meta/recipes-core/libxml/libxml2/Fix-CVE-2019-19956.patch40
-rw-r--r--meta/recipes-core/libxml/libxml2_2.9.9.bb4
-rw-r--r--meta/recipes-core/meta/buildtools-extended-tarball.bb36
-rw-r--r--meta/recipes-core/meta/buildtools-tarball.bb7
-rw-r--r--meta/recipes-core/meta/cve-update-db-native.bb24
-rw-r--r--meta/recipes-core/meta/dummy-sdk-package.inc3
-rw-r--r--meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb8
-rw-r--r--meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb5
-rw-r--r--meta/recipes-core/meta/target-sdk-provides-dummy.bb1
-rw-r--r--meta/recipes-core/ncurses/ncurses.inc1
-rw-r--r--meta/recipes-core/ncurses/ncurses_6.1+20190803.bb2
-rw-r--r--meta/recipes-core/systemd/systemd/0001-Merge-branch-polkit-ref-count.patch520
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2020-13776.patch96
-rw-r--r--meta/recipes-core/systemd/systemd_243.2.bb2
-rw-r--r--meta/recipes-core/sysvinit/sysvinit_2.88dsf.bb1
-rw-r--r--meta/recipes-devtools/apt/files/apt.conf2
-rw-r--r--meta/recipes-devtools/binutils/binutils-2.32.inc1
-rw-r--r--meta/recipes-devtools/binutils/binutils/0001-Fix-a-missing-include-of-string.patch49
-rw-r--r--meta/recipes-devtools/binutils/binutils/nativesdk-relocation.patch80
-rw-r--r--meta/recipes-devtools/binutils/binutils_2.32.bb5
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch49
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch57
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch76
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.3.bb3
-rw-r--r--meta/recipes-devtools/file/file_5.37.bb2
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.2.inc1
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.2/re-PR-target-91102-aarch64-ICE-on-Linux-kernel-with-.patch95
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross-canadian.inc4
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross.inc7
-rw-r--r--meta/recipes-devtools/gcc/gcc-runtime.inc4
-rw-r--r--meta/recipes-devtools/gcc/gcc-target.inc8
-rw-r--r--meta/recipes-devtools/git/git.inc16
-rw-r--r--meta/recipes-devtools/git/git/0001-t-lib-credential-use-test_i18ncmp-to-check-stderr.patch35
-rw-r--r--meta/recipes-devtools/git/git/0002-credential-detect-unrepresentable-values-when-parsin.patch156
-rw-r--r--meta/recipes-devtools/git/git/0003-fsck-detect-gitmodules-URLs-with-embedded-newlines.patch103
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-1.patch70
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-2.patch292
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-3.patch97
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-4.patch173
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-5.patch211
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-6.patch84
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-7.patch206
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-8.patch114
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-11008-9.patch114
-rw-r--r--meta/recipes-devtools/git/git/CVE-2020-5260.patch65
-rw-r--r--meta/recipes-devtools/go/go-1.12.inc4
-rw-r--r--meta/recipes-devtools/go/go-1.12/0001-net-http-cgi-rename-a-test-file-to-be-less-cute.patch28
-rw-r--r--meta/recipes-devtools/go/go-1.12/CVE-2020-15586.patch131
-rw-r--r--meta/recipes-devtools/go/go-1.12/CVE-2020-16845.patch110
-rw-r--r--meta/recipes-devtools/go/go-1.12/CVE-2020-24553.patch429
-rw-r--r--meta/recipes-devtools/mtd/mtd-utils/0001-mtd-utils-Fix-return-value-of-ubiformat.patch62
-rw-r--r--meta/recipes-devtools/mtd/mtd-utils_git.bb1
-rw-r--r--meta/recipes-devtools/nasm/nasm/0001-BR3392712-pp_tokline-fix-double-free.patch36
-rw-r--r--meta/recipes-devtools/nasm/nasm_2.14.02.bb1
-rw-r--r--meta/recipes-devtools/opkg-utils/opkg-utils/0001-Switch-all-scripts-to-use-Python-3.x.patch113
-rw-r--r--meta/recipes-devtools/opkg-utils/opkg-utils/0001-opkg-build-clamp-mtimes-to-SOURCE_DATE_EPOCH.patch44
-rw-r--r--meta/recipes-devtools/opkg-utils/opkg-utils/fix-reproducibility.patch32
-rw-r--r--meta/recipes-devtools/opkg-utils/opkg-utils/pipefail.patch31
-rw-r--r--meta/recipes-devtools/opkg-utils/opkg-utils_0.4.2.bb (renamed from meta/recipes-devtools/opkg-utils/opkg-utils_0.4.1.bb)13
-rw-r--r--meta/recipes-devtools/patch/patch_2.7.6.bb3
-rw-r--r--meta/recipes-devtools/patchelf/patchelf/fix-phdrs.patch37
-rw-r--r--meta/recipes-devtools/patchelf/patchelf_0.10.bb1
-rw-r--r--meta/recipes-devtools/perl/files/0001-tests-adjust-to-correctly-exclude-unbuilt-extensions.patch27
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2020-10543.patch36
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2020-10878_1.patch152
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2020-10878_2.patch36
-rw-r--r--meta/recipes-devtools/perl/files/determinism.patch81
-rw-r--r--meta/recipes-devtools/perl/files/encodefix.patch20
-rw-r--r--meta/recipes-devtools/perl/files/fix-setgroup.patch49
-rw-r--r--meta/recipes-devtools/perl/files/perl-configpm-switch.patch4
-rw-r--r--meta/recipes-devtools/perl/files/racefix.patch24
-rw-r--r--meta/recipes-devtools/perl/liberror-perl_0.17029.bb (renamed from meta/recipes-devtools/perl/liberror-perl_0.17028.bb)4
-rw-r--r--meta/recipes-devtools/perl/libmodule-build-perl/run-ptest2
-rw-r--r--meta/recipes-devtools/perl/libmodule-build-perl_0.4229.bb3
-rw-r--r--meta/recipes-devtools/perl/perl-ptest.inc3
-rw-r--r--meta/recipes-devtools/perl/perl_5.30.1.bb (renamed from meta/recipes-devtools/perl/perl_5.30.0.bb)40
-rw-r--r--meta/recipes-devtools/pseudo/pseudo.inc2
-rw-r--r--meta/recipes-devtools/python-numpy/files/aarch64/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/aarch64/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/arm/config.h21
-rw-r--r--meta/recipes-devtools/python-numpy/files/arm/numpyconfig.h17
-rw-r--r--meta/recipes-devtools/python-numpy/files/armeb/config.h21
-rw-r--r--meta/recipes-devtools/python-numpy/files/armeb/numpyconfig.h17
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn32eb/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn32eb/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn32el/_numpyconfig.h31
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn32el/config.h138
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn64eb/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn64eb/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn64el/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarchn64el/config.h138
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarcho32eb/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarcho32eb/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarcho32el/config.h21
-rw-r--r--meta/recipes-devtools/python-numpy/files/mipsarcho32el/numpyconfig.h18
-rw-r--r--meta/recipes-devtools/python-numpy/files/powerpc/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/powerpc/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/powerpc64/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/powerpc64/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/riscv64/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/riscv64/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/x86-64/_numpyconfig.h32
-rw-r--r--meta/recipes-devtools/python-numpy/files/x86-64/config.h139
-rw-r--r--meta/recipes-devtools/python-numpy/files/x86/config.h108
-rw-r--r--meta/recipes-devtools/python-numpy/files/x86/numpyconfig.h24
-rw-r--r--meta/recipes-devtools/python-numpy/python-numpy.inc68
-rw-r--r--meta/recipes-devtools/python/python-native_2.7.18.bb (renamed from meta/recipes-devtools/python/python-native_2.7.17.bb)0
-rw-r--r--meta/recipes-devtools/python/python.inc6
-rw-r--r--meta/recipes-devtools/python/python/python2-manifest.json1
-rw-r--r--meta/recipes-devtools/python/python3-testtools/no_traceback2.patch23
-rw-r--r--meta/recipes-devtools/python/python3-testtools_2.3.0.bb2
-rw-r--r--meta/recipes-devtools/python/python3/0001-Don-t-search-system-for-headers-libraries.patch29
-rw-r--r--meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch31
-rw-r--r--meta/recipes-devtools/python/python3/12-distutils-prefix-is-inside-staging-area.patch2
-rw-r--r--meta/recipes-devtools/python/python3/CVE-2020-14422.patch79
-rw-r--r--meta/recipes-devtools/python/python3/CVE-2020-26116.patch106
-rw-r--r--meta/recipes-devtools/python/python3_3.7.8.bb (renamed from meta/recipes-devtools/python/python3_3.7.6.bb)28
-rw-r--r--meta/recipes-devtools/python/python_2.7.18.bb (renamed from meta/recipes-devtools/python/python_2.7.17.bb)0
-rw-r--r--meta/recipes-devtools/qemu/qemu.inc18
-rw-r--r--meta/recipes-devtools/qemu/qemu/0011-linux-user-remove-host-stime-syscall.patch61
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2019-20382.patch1018
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-10702.patch52
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-10756.patch40
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-11869.patch97
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-12829.patch267
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13765.patch48
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-14364.patch93
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15863.patch64
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-16092.patch49
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-1711.patch64
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-7039-1.patch44
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-7039-2.patch59
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-7039-3.patch64
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-7211.patch46
-rw-r--r--meta/recipes-devtools/rsync/rsync_3.1.3.bb3
-rw-r--r--meta/recipes-devtools/ruby/ruby/fix-CVE-2019-16254.patch106
-rw-r--r--meta/recipes-devtools/ruby/ruby_2.5.5.bb1
-rw-r--r--meta/recipes-devtools/strace/strace/Makefile-ptest.patch2
-rwxr-xr-xmeta/recipes-devtools/strace/strace/run-ptest7
-rw-r--r--meta/recipes-extended/cpio/cpio-2.12/CVE-2019-14866.patch316
-rw-r--r--meta/recipes-extended/cpio/cpio_2.12.bb1
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2019-10216.patch53
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript_9.27.bb1
-rw-r--r--meta/recipes-extended/iputils/iputils_s20190709.bb3
-rw-r--r--meta/recipes-extended/libarchive/libarchive/0001-RAR5-reader-reject-files-that-declare-invalid-header.patch124
-rw-r--r--meta/recipes-extended/libarchive/libarchive_3.4.0.bb1
-rw-r--r--meta/recipes-extended/libidn/libidn2_2.2.0.bb3
-rw-r--r--meta/recipes-extended/man-db/man-db_2.8.7.bb2
-rw-r--r--meta/recipes-extended/mc/files/0001-Add-option-to-control-configure-args.patch99
-rw-r--r--meta/recipes-extended/mc/files/nomandate.patch21
-rw-r--r--meta/recipes-extended/mc/mc_4.8.23.bb7
-rw-r--r--meta/recipes-extended/pam/libpam/pam.d/common-password5
-rw-r--r--meta/recipes-extended/procps/procps/0001-top-avoid-a-potential-SEGV-during-program-terminatio.patch61
-rw-r--r--meta/recipes-extended/procps/procps/0001-top-restore-one-line-of-code-to-sig_endpgm-function.patch38
-rw-r--r--meta/recipes-extended/procps/procps_3.3.15.bb2
-rw-r--r--meta/recipes-extended/psmisc/psmisc.inc2
-rw-r--r--meta/recipes-extended/screen/screen/CVE-2020-9366.patch48
-rw-r--r--meta/recipes-extended/screen/screen_4.6.2.bb1
-rw-r--r--meta/recipes-extended/sudo/sudo.inc2
-rw-r--r--meta/recipes-extended/sudo/sudo_1.8.27.bb10
-rw-r--r--meta/recipes-extended/tar/tar_1.32.bb2
-rw-r--r--meta/recipes-extended/timezone/timezone.inc10
-rw-r--r--meta/recipes-gnome/gcr/gcr_3.28.1.bb2
-rw-r--r--meta/recipes-gnome/gtk+/gtk+3/sort-resources.patch19
-rw-r--r--meta/recipes-gnome/gtk+/gtk+3_3.24.8.bb1
-rw-r--r--meta/recipes-graphics/jpeg/files/0001-rdppm.c-Fix-buf-overrun-caused-by-bad-binary-PPM.patch81
-rw-r--r--meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.3.bb1
-rw-r--r--meta/recipes-graphics/mesa/files/0003-Allow-enable-DRI-without-DRI-drivers.patch2
-rw-r--r--meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2019-18390.patch66
-rw-r--r--meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2019-18391.patch51
-rw-r--r--meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2020-8002.patch39
-rw-r--r--meta/recipes-graphics/virglrenderer/virglrenderer_0.8.0.bb3
-rw-r--r--meta/recipes-graphics/waffle/waffle_1.6.0.bb5
-rw-r--r--meta/recipes-graphics/wayland/libinput/determinism.patch21
-rw-r--r--meta/recipes-graphics/wayland/libinput_1.14.1.bb4
-rw-r--r--meta/recipes-graphics/x11-common/xserver-nodm-init/capability.conf2
-rwxr-xr-xmeta/recipes-graphics/x11-common/xserver-nodm-init/xserver-nodm8
-rw-r--r--meta/recipes-graphics/x11-common/xserver-nodm-init_3.0.bb7
-rw-r--r--meta/recipes-graphics/xorg-app/xorg-app-common.inc2
-rw-r--r--meta/recipes-graphics/xorg-font/encodings_1.0.5.bb4
-rw-r--r--meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb2
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14347.patch37
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.5.bb1
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_5.2.bb6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_5.2.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_4.19.bb20
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_5.2.bb22
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0001-Fix-SUNRPC-Fix-oops-when-trace-sunrpc_task-events-in.patch94
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0002-Fix-sunrpc-null-rpc_clnt-dereference-in-rpc_task_que.patch44
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0003-Fix-sunrpc-use-signed-integer-for-client-id.patch105
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0004-sunrpc-introduce-lttng_get_clid-helper.patch130
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules_2.10.14.bb (renamed from meta/recipes-kernel/lttng/lttng-modules_2.10.11.bb)12
-rw-r--r--meta/recipes-kernel/perf/perf.bb8
-rw-r--r--meta/recipes-kernel/wireless-regdb/wireless-regdb_2020.04.29.bb (renamed from meta/recipes-kernel/wireless-regdb/wireless-regdb_2019.06.03.bb)3
-rw-r--r--meta/recipes-multimedia/gstreamer/gst-validate_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gst-validate_1.16.1.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.1.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.1.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.1.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.1.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.1.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.1.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins.inc2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-python/0001-meson.build-fix-builds-with-python-3.8.patch24
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.1.bb)8
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.1.bb)6
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.1.bb)4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.2.bb (renamed from meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.1.bb)6
-rwxr-xr-xmeta/recipes-sato/webkit/webkitgtk/fix-link-error.patch45
-rw-r--r--meta/recipes-sato/webkit/webkitgtk_2.24.4.bb1
-rw-r--r--meta/recipes-support/aspell/aspell/CVE-2019-20433-0001.patch999
-rw-r--r--meta/recipes-support/aspell/aspell/CVE-2019-20433-0002.patch68
-rw-r--r--meta/recipes-support/aspell/aspell_0.60.7.bb2
-rw-r--r--meta/recipes-support/attr/acl_2.2.52.bb3
-rw-r--r--meta/recipes-support/attr/attr_2.4.47.bb3
-rw-r--r--meta/recipes-support/curl/curl/CVE-2019-15601.patch46
-rw-r--r--meta/recipes-support/curl/curl/CVE-2020-8169.patch141
-rw-r--r--meta/recipes-support/curl/curl/CVE-2020-8177.patch67
-rw-r--r--meta/recipes-support/curl/curl_7.66.0.bb3
-rw-r--r--meta/recipes-support/gnupg/gnupg_2.2.19.bb (renamed from meta/recipes-support/gnupg/gnupg_2.2.17.bb)4
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2020-13777-a.patch90
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2020-13777-b.patch137
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2020-13777-c.patch68
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2020-24659.patch117
-rw-r--r--meta/recipes-support/gnutls/gnutls/posix-shell.patch39
-rw-r--r--meta/recipes-support/gnutls/gnutls_3.6.13.bb (renamed from meta/recipes-support/gnutls/gnutls_3.6.8.bb)9
-rw-r--r--meta/recipes-support/icu/icu/CVE-2020-10531.patch122
-rw-r--r--meta/recipes-support/icu/icu_64.2.bb12
-rw-r--r--meta/recipes-support/iso-codes/iso-codes_4.3.bb2
-rw-r--r--meta/recipes-support/libevdev/libevdev/determinism.patch34
-rw-r--r--meta/recipes-support/libevdev/libevdev_1.8.0.bb3
-rw-r--r--meta/recipes-support/libexif/libexif/CVE-2020-13114.patch73
-rw-r--r--meta/recipes-support/libexif/libexif_0.6.21.bb4
-rw-r--r--meta/recipes-support/libgcrypt/files/determinism.patch32
-rw-r--r--meta/recipes-support/libgcrypt/libgcrypt_1.8.4.bb1
-rw-r--r--meta/recipes-support/libpcre/libpcre/CVE-2020-14155.patch41
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2019-20454.patch19
-rw-r--r--meta/recipes-support/libpcre/libpcre2_10.33.bb1
-rw-r--r--meta/recipes-support/libpcre/libpcre_8.43.bb1
-rw-r--r--meta/recipes-support/nss/nss/0001-Bug-1631576-Force-a-fixed-length-for-DSA-exponentiat.patch110
-rw-r--r--meta/recipes-support/nss/nss_3.45.bb1
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2020-11655.patch32
-rw-r--r--meta/recipes-support/sqlite/sqlite3/CVE-2019-19244.patch33
-rw-r--r--meta/recipes-support/sqlite/sqlite3/CVE-2019-19923.patch50
-rw-r--r--meta/recipes-support/sqlite/sqlite3/CVE-2019-19924.patch65
-rw-r--r--meta/recipes-support/sqlite/sqlite3/CVE-2019-19925.patch33
-rw-r--r--meta/recipes-support/sqlite/sqlite3/CVE-2019-19926.patch31
-rw-r--r--meta/recipes-support/sqlite/sqlite3/CVE-2019-19959.patch46
-rw-r--r--meta/recipes-support/sqlite/sqlite3/CVE-2019-20218.patch31
-rw-r--r--meta/recipes-support/sqlite/sqlite3/CVE-2020-13632.patch32
-rw-r--r--meta/recipes-support/sqlite/sqlite3_3.29.0.bb12
-rw-r--r--meta/recipes-support/vim/vim_8.1.1518.bb5
-rw-r--r--scripts/lib/devtool/standard.py6
-rw-r--r--scripts/lib/resulttool/resultutils.py5
-rw-r--r--scripts/lib/wic/engine.py5
-rw-r--r--scripts/lib/wic/filemap.py6
-rw-r--r--scripts/lib/wic/help.py3
-rw-r--r--scripts/lib/wic/pluginbase.py11
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py6
-rwxr-xr-xscripts/oe-build-perf-report20
355 files changed, 14072 insertions, 6599 deletions
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 01b3637469..514897e8b8 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -52,17 +52,20 @@ python do_cve_check () {
"""
if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
- patched_cves = get_patches_cves(d)
- patched, unpatched = check_cves(d, patched_cves)
+ try:
+ patched_cves = get_patches_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ whitelisted, patched, unpatched = check_cves(d, patched_cves)
if patched or unpatched:
cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, cve_data)
+ cve_write_data(d, patched, unpatched, whitelisted, cve_data)
else:
bb.note("No CVE database found, skipping CVE check")
}
-addtask cve_check before do_build
+addtask cve_check before do_build after do_fetch
do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
do_cve_check[nostamp] = "1"
@@ -129,6 +132,10 @@ def get_patches_cves(d):
for url in src_patches(d):
patch_file = bb.fetch.decodeurl(url)[2]
+ if not os.path.isfile(patch_file):
+ bb.error("File Not found: %s" % patch_file)
+ raise FileNotFoundError
+
# Check patch file name for CVE ID
fname_match = cve_file_name_match.search(patch_file)
if fname_match:
@@ -172,13 +179,13 @@ def check_cves(d, patched_cves):
products = d.getVar("CVE_PRODUCT").split()
# If this has been unset then we're not scanning for CVEs here (for example, image recipes)
if not products:
- return ([], [])
+ return ([], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
# If the recipe has been whitlisted we return empty lists
if d.getVar("PN") in d.getVar("CVE_CHECK_PN_WHITELIST").split():
bb.note("Recipe has been whitelisted, skipping check")
- return ([], [])
+ return ([], [], [])
old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST")
if old_cve_whitelist:
@@ -214,7 +221,7 @@ def check_cves(d, patched_cves):
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
- if (operator_start == '=' and pv == version_start):
+ if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
else:
if operator_start:
@@ -256,7 +263,7 @@ def check_cves(d, patched_cves):
conn.close()
- return (list(patched_cves), cves_unpatched)
+ return (list(cve_whitelist), list(patched_cves), cves_unpatched)
def get_cve_info(d, cves):
"""
@@ -280,7 +287,7 @@ def get_cve_info(d, cves):
conn.close()
return cve_data
-def cve_write_data(d, patched, unpatched, cve_data):
+def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
@@ -294,9 +301,11 @@ def cve_write_data(d, patched, unpatched, cve_data):
for cve in sorted(cve_data):
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
- write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV")
+ write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in patched:
+ if cve in whitelisted:
+ write_string += "CVE STATUS: Whitelisted\n"
+ elif cve in patched:
write_string += "CVE STATUS: Patched\n"
else:
unpatched_cves.append(cve)
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 1bcb09c598..6cd1b76fde 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -53,6 +53,9 @@ UBOOT_MKIMAGE_DTCOPTS ??= ""
# fitImage Hash Algo
FIT_HASH_ALG ?= "sha256"
+# fitImage Signature Algo
+FIT_SIGN_ALG ?= "rsa2048"
+
#
# Emit the fitImage ITS header
#
@@ -246,6 +249,7 @@ EOF
fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
+ conf_sign_algo="${FIT_SIGN_ALG}"
if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
@@ -327,7 +331,7 @@ EOF
cat << EOF >> ${1}
signature@1 {
- algo = "${conf_csum},rsa2048";
+ algo = "${conf_csum},${conf_sign_algo}";
key-name-hint = "${conf_sign_keyname}";
${sign_line}
};
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index ed9bcfa57c..ab05ac91f4 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -1,5 +1,5 @@
# remove tasks that modify the source tree in case externalsrc is inherited
-SRCTREECOVEREDTASKS += "do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
+SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
@@ -301,6 +301,7 @@ do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
+do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
set +e
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 750988f4e5..9ace74564c 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -452,7 +452,7 @@ do_shared_workdir () {
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
- cp Module.symvers $kerneldir/
+ [ -e Module.symvers ] && cp Module.symvers $kerneldir/
cp .config $kerneldir/
mkdir -p $kerneldir/include/config
cp include/config/kernel.release $kerneldir/include/config/kernel.release
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
index 675d40ec9a..a951ba3325 100644
--- a/meta/classes/kernelsrc.bbclass
+++ b/meta/classes/kernelsrc.bbclass
@@ -1,7 +1,7 @@
S = "${STAGING_KERNEL_DIR}"
deltask do_fetch
deltask do_unpack
-do_patch[depends] += "virtual/kernel:do_patch"
+do_patch[depends] += "virtual/kernel:do_shared_workdir"
do_patch[noexec] = "1"
do_package[depends] += "virtual/kernel:do_populate_sysroot"
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index adca881c85..b0d37b119c 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -252,7 +252,7 @@ def canonical_license(d, license):
"""
Return the canonical (SPDX) form of the license if available (so GPLv3
becomes GPL-3.0), for the license named 'X+', return canonical form of
- 'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
+ 'X' if available and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
or the passed license if there is no canonical form.
"""
lic = d.getVarFlag('SPDXLICENSEMAP', license) or ""
@@ -262,10 +262,29 @@ def canonical_license(d, license):
lic += '+'
return lic or license
+def available_licenses(d):
+ """
+ Return the available licenses by searching the directories specified by
+ COMMON_LICENSE_DIR and LICENSE_PATH.
+ """
+ lic_dirs = ((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' +
+ (d.getVar('LICENSE_PATH') or '')).split()
+
+ licenses = []
+ for lic_dir in lic_dirs:
+ licenses += os.listdir(lic_dir)
+
+ licenses = sorted(licenses)
+ return licenses
+
+# Only determine the list of all available licenses once. This assumes that any
+# additions to LICENSE_PATH have been done before this file is parsed.
+AVAILABLE_LICENSES := "${@' '.join(available_licenses(d))}"
+
def expand_wildcard_licenses(d, wildcard_licenses):
"""
- Return actual spdx format license names if wildcard used. We expand
- wildcards from SPDXLICENSEMAP flags and SRC_DISTRIBUTE_LICENSES values.
+ Return actual spdx format license names if wildcards are used. We expand
+ wildcards from SPDXLICENSEMAP flags and AVAILABLE_LICENSES.
"""
import fnmatch
licenses = wildcard_licenses[:]
@@ -274,7 +293,7 @@ def expand_wildcard_licenses(d, wildcard_licenses):
spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
- spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES', False) or '').split()
+ spdx_lics = d.getVar('AVAILABLE_LICENSES').split()
for wld_lic in wildcard_licenses:
licenses += fnmatch.filter(spdx_lics, wld_lic)
diff --git a/meta/classes/linuxloader.bbclass b/meta/classes/linuxloader.bbclass
index c0fbf26836..e2876cec7a 100644
--- a/meta/classes/linuxloader.bbclass
+++ b/meta/classes/linuxloader.bbclass
@@ -41,7 +41,7 @@ def get_glibc_loader(d):
elif re.search("i.86", targetarch):
dynamic_loader = "${base_libdir}/ld-linux.so.2"
elif targetarch == "arm":
- dynamic_loader = "${base_libdir}/ld-linux.so.3"
+ dynamic_loader = "${base_libdir}/ld-linux${@['-armhf', ''][d.getVar('TARGET_FPU') == 'soft']}.so.3"
elif targetarch.startswith("aarch64"):
dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
elif targetarch.startswith("riscv64"):
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index cd241f1c84..25ec089ae1 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -5,6 +5,13 @@ QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
+# There is a bug in patch 2.7.3 and earlier where index lines
+# in patches can change file modes when they shouldn't:
+# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
+# This leaks into debug sources in particular. Add the dependency
+# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
+PATCHDEPENDENCY_append_class-target = " patch-replacement-native:do_populate_sysroot"
+
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
index e5d7ab3ce1..87b4c85fc0 100644
--- a/meta/classes/pypi.bbclass
+++ b/meta/classes/pypi.bbclass
@@ -22,5 +22,5 @@ SECTION = "devel/python"
SRC_URI += "${PYPI_SRC_URI}"
S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
-UPSTREAM_CHECK_URI ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
-UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)"
+UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
+UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
index 582812c1cf..af04be5cca 100644
--- a/meta/classes/relocatable.bbclass
+++ b/meta/classes/relocatable.bbclass
@@ -6,13 +6,15 @@ python relocatable_binaries_preprocess() {
rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
}
-relocatable_native_pcfiles () {
- if [ -d ${SYSROOT_DESTDIR}${libdir}/pkgconfig ]; then
- rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('libdir') + "/pkgconfig")}
- sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${libdir}/pkgconfig/*.pc
- fi
- if [ -d ${SYSROOT_DESTDIR}${datadir}/pkgconfig ]; then
- rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('datadir') + "/pkgconfig")}
- sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${datadir}/pkgconfig/*.pc
- fi
+relocatable_native_pcfiles() {
+ for dir in ${libdir}/pkgconfig ${datadir}/pkgconfig; do
+ files_template=${SYSROOT_DESTDIR}$dir/*.pc
+ # Expand to any files matching $files_template
+ files=$(echo $files_template)
+ # $files_template and $files will differ if any files were found
+ if [ "$files_template" != "$files" ]; then
+ rel=$(realpath -m --relative-to=$dir ${base_prefix})
+ sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" $files
+ fi
+ done
}
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
index 39b6e40cac..750eb950f2 100644
--- a/meta/classes/reproducible_build.bbclass
+++ b/meta/classes/reproducible_build.bbclass
@@ -44,10 +44,12 @@ SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
SSTATETASKS += "do_deploy_source_date_epoch"
do_deploy_source_date_epoch () {
- echo "Deploying SDE to ${SDE_DIR}."
mkdir -p ${SDE_DEPLOYDIR}
if [ -e ${SDE_FILE} ]; then
+ echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
+ else
+ echo "${SDE_FILE} not found!"
fi
}
@@ -56,7 +58,11 @@ python do_deploy_source_date_epoch_setscene () {
bb.utils.mkdirhier(d.getVar('SDE_DIR'))
sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
if os.path.exists(sde_file):
- os.rename(sde_file, d.getVar('SDE_FILE'))
+ target = d.getVar('SDE_FILE')
+ bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
+ os.rename(sde_file, target)
+ else:
+ bb.debug(1, "%s not found!" % sde_file)
}
do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
@@ -164,16 +170,32 @@ python do_create_source_date_epoch_stamp() {
f.write(str(source_date_epoch))
}
+def get_source_date_epoch_value(d):
+ cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH')
+ if cached:
+ return cached
+
+ epochfile = d.getVar('SDE_FILE')
+ source_date_epoch = 0
+ if os.path.isfile(epochfile):
+ with open(epochfile, 'r') as f:
+ s = f.read()
+ try:
+ source_date_epoch = int(s)
+ except ValueError:
+ bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to 0" % s)
+ source_date_epoch = 0
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ else:
+ bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
+
+ d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch))
+ return str(source_date_epoch)
+
+export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
python () {
if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
d.appendVarFlag("do_unpack", "postfuncs", " do_create_source_date_epoch_stamp")
- epochfile = d.getVar('SDE_FILE')
- source_date_epoch = "0"
- if os.path.isfile(epochfile):
- with open(epochfile, 'r') as f:
- source_date_epoch = f.read()
- bb.debug(1, "SOURCE_DATE_EPOCH: %s" % source_date_epoch)
- d.setVar('SOURCE_DATE_EPOCH', source_date_epoch)
}
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 936fe913b4..5c2f8f9d75 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -625,13 +625,14 @@ def check_sanity_version_change(status, d):
# In other words, these tests run once in a given build directory and then
# never again until the sanity version or host distrubution id/version changes.
- # Check the python install is complete. glib-2.0-natives requries
- # xml.parsers.expat
+ # Check the python install is complete. Examples that are often removed in
+ # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
+ # requires distutils.sysconfig.
try:
import xml.parsers.expat
- except ImportError:
- status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
- import stat
+ import distutils.sysconfig
+ except ImportError as e:
+ status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
status.addresult(check_make_version(d))
status.addresult(check_patch_version(d))
@@ -667,6 +668,7 @@ def check_sanity_version_change(status, d):
status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
# Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
+ import stat
tmpdir = d.getVar('TMPDIR')
status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
tmpdirmode = os.stat(tmpdir).st_mode
diff --git a/meta/conf/distro/include/maintainers.inc b/meta/conf/distro/include/maintainers.inc
index ab0c6c5541..7494873190 100644
--- a/meta/conf/distro/include/maintainers.inc
+++ b/meta/conf/distro/include/maintainers.inc
@@ -82,6 +82,7 @@ RECIPE_MAINTAINER_pn-build-appliance-image = "Richard Purdie <richard.purdie@lin
RECIPE_MAINTAINER_pn-build-compare = "Paul Eggleton <paul.eggleton@linux.intel.com>"
RECIPE_MAINTAINER_pn-build-sysroots = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-builder = "Richard Purdie <richard.purdie@linuxfoundation.org>"
+RECIPE_MAINTAINER_pn-buildtools-extended-tarball = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-buildtools-tarball = "Richard Purdie <richard.purdie@linuxfoundation.org>"
RECIPE_MAINTAINER_pn-busybox = "Andrej Valek <andrej.valek@siemens.com>"
RECIPE_MAINTAINER_pn-busybox-inittab = "Denys Dmytriyenko <denys@ti.com>"
diff --git a/meta/conf/distro/include/security_flags.inc b/meta/conf/distro/include/security_flags.inc
index aaf04e9e59..568d03693c 100644
--- a/meta/conf/distro/include/security_flags.inc
+++ b/meta/conf/distro/include/security_flags.inc
@@ -57,6 +57,8 @@ SECURITY_STRINGFORMAT_pn-gcc = ""
TARGET_CC_ARCH_append_class-target = " ${SECURITY_CFLAGS}"
TARGET_LDFLAGS_append_class-target = " ${SECURITY_LDFLAGS}"
+TARGET_CC_ARCH_append_class-cross-canadian = " ${SECURITY_CFLAGS}"
+TARGET_LDFLAGS_append_class-cross-canadian = " ${SECURITY_LDFLAGS}"
SECURITY_STACK_PROTECTOR_pn-gcc-runtime = ""
SECURITY_STACK_PROTECTOR_pn-glibc = ""
diff --git a/meta/conf/distro/include/yocto-uninative.inc b/meta/conf/distro/include/yocto-uninative.inc
index ad75d3e2a3..69b6edee5f 100644
--- a/meta/conf/distro/include/yocto-uninative.inc
+++ b/meta/conf/distro/include/yocto-uninative.inc
@@ -6,9 +6,9 @@
# to the distro running on the build machine.
#
-UNINATIVE_MAXGLIBCVERSION = "2.30"
+UNINATIVE_MAXGLIBCVERSION = "2.32"
-UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.7/"
-UNINATIVE_CHECKSUM[aarch64] ?= "e76a45886ee8a0b3904b761c17ac8ff91edf9811ee455f1832d10763ba794dfc"
-UNINATIVE_CHECKSUM[i686] ?= "810d027dfb1c7675226afbcec07808770516c969ee7378f6d8240281083f8924"
-UNINATIVE_CHECKSUM[x86_64] ?= "9498d8bba047499999a7310ac2576d0796461184965351a56f6d32c888a1f216"
+UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.9/"
+UNINATIVE_CHECKSUM[aarch64] ?= "9f25a667aee225b1dd65c4aea73e01983e825b1cb9b56937932a1ee328b45f81"
+UNINATIVE_CHECKSUM[i686] ?= "cae5d73245d95b07cf133b780ba3f6c8d0adca3ffc4e7e7fab999961d5e24d36"
+UNINATIVE_CHECKSUM[x86_64] ?= "d07916b95c419c81541a19c8ef0ed8cbd78ae18437ff28a4c8a60ef40518e423"
diff --git a/meta/conf/documentation.conf b/meta/conf/documentation.conf
index 550df20b0f..ce2a37e0e5 100644
--- a/meta/conf/documentation.conf
+++ b/meta/conf/documentation.conf
@@ -69,6 +69,7 @@ ASSUME_SHLIBS[doc] = "List of shlib:package[_version] mappings. Useful for lib p
AUTHOR[doc] = "Email address used to contact the original author(s) in order to send patches and forward bugs."
AUTO_SYSLINUXMENU[doc] = "Enables creating an automatic menu for the syslinux bootloader."
AUTOREV[doc] = "When SRCREV is set to the value of this variable, it specifies to use the latest source revision in the repository."
+AVAILABLE_LICENSES[doc] = "List of licenses found in the directories specified by COMMON_LICENSE_DIR and LICENSE_PATH."
#B
diff --git a/meta/conf/multilib.conf b/meta/conf/multilib.conf
index cfed3fbbd0..58f2ac5c86 100644
--- a/meta/conf/multilib.conf
+++ b/meta/conf/multilib.conf
@@ -9,6 +9,7 @@ MULTILIBS ??= "multilib:lib32"
STAGING_DIR_HOST = "${WORKDIR}/${MLPREFIX}recipe-sysroot"
STAGING_DIR_TARGET = "${WORKDIR}/${MLPREFIX}recipe-sysroot"
RECIPE_SYSROOT = "${WORKDIR}/${MLPREFIX}recipe-sysroot"
+RECIPE_SYSROOT_class-native = "${WORKDIR}/recipe-sysroot"
INHERIT += "multilib_global"
diff --git a/meta/files/toolchain-shar-extract.sh b/meta/files/toolchain-shar-extract.sh
index ccc4f4e1ac..2e0fe94963 100644
--- a/meta/files/toolchain-shar-extract.sh
+++ b/meta/files/toolchain-shar-extract.sh
@@ -1,13 +1,8 @@
#!/bin/sh
-[ -z "$ENVCLEANED" ] && exec /usr/bin/env -i ENVCLEANED=1 HOME="$HOME" \
- LC_ALL=en_US.UTF-8 \
- TERM=$TERM \
- ICECC_PATH="$ICECC_PATH" \
- http_proxy="$http_proxy" https_proxy="$https_proxy" ftp_proxy="$ftp_proxy" \
- no_proxy="$no_proxy" all_proxy="$all_proxy" GIT_PROXY_COMMAND="$GIT_PROXY_COMMAND" "$0" "$@"
-[ -f /etc/environment ] && . /etc/environment
-export PATH=`echo "$PATH" | sed -e 's/:\.//' -e 's/::/:/'`
+export LC_ALL=en_US.UTF-8
+# Remove invalid PATH elements first (maybe from a previously setup toolchain now deleted
+PATH=`python3 -c 'import os; print(":".join(e for e in os.environ["PATH"].split(":") if os.path.exists(e)))'`
tweakpath () {
case ":${PATH}:" in
@@ -249,7 +244,7 @@ if [ @SDK_ARCHIVE_TYPE@ = "zip" ]; then
rm sdk.zip && exit 1
fi
else
- tail -n +$payload_offset $0| $SUDO_EXEC tar xJ -C $target_sdk_dir --checkpoint=.2500 $EXTRA_TAR_OPTIONS || exit 1
+ tail -n +$payload_offset $0| $SUDO_EXEC tar mxJ -C $target_sdk_dir --checkpoint=.2500 $EXTRA_TAR_OPTIONS || exit 1
fi
echo "done"
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
index 7c373715ad..e0b15dc9b4 100644
--- a/meta/lib/oe/package_manager.py
+++ b/meta/lib/oe/package_manager.py
@@ -40,8 +40,9 @@ def opkg_query(cmd_output):
ver = ""
filename = ""
dep = []
+ prov = []
pkgarch = ""
- for line in cmd_output.splitlines():
+ for line in cmd_output.splitlines()+['']:
line = line.rstrip()
if ':' in line:
if line.startswith("Package: "):
@@ -64,6 +65,10 @@ def opkg_query(cmd_output):
dep.append("%s [REC]" % recommend)
elif line.startswith("PackageArch: "):
pkgarch = line.split(": ")[1]
+ elif line.startswith("Provides: "):
+ provides = verregex.sub('', line.split(": ")[1])
+ for provide in provides.split(", "):
+ prov.append(provide)
# When there is a blank line save the package information
elif not line:
@@ -72,20 +77,15 @@ def opkg_query(cmd_output):
filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
if pkg:
output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep, "pkgarch":pkgarch }
+ "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov}
pkg = ""
arch = ""
ver = ""
filename = ""
dep = []
+ prov = []
pkgarch = ""
- if pkg:
- if not filename:
- filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
- output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep }
-
return output
def failed_postinsts_abort(pkgs, log_path):
@@ -360,7 +360,7 @@ class DpkgPkgsList(PkgsList):
"--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
"-W"]
- cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
+ cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n")
try:
cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
@@ -578,6 +578,11 @@ class PackageManager(object, metaclass=ABCMeta):
# oe-pkgdata-util reads it from a file
with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
pkgs = self.list_installed()
+
+ provided_pkgs = set()
+ for pkg in pkgs.values():
+ provided_pkgs |= set(pkg.get('provs', []))
+
output = oe.utils.format_pkg_list(pkgs, "arch")
installed_pkgs.write(output)
installed_pkgs.flush()
@@ -589,10 +594,15 @@ class PackageManager(object, metaclass=ABCMeta):
if exclude:
cmd.extend(['--exclude=' + '|'.join(exclude.split())])
try:
- bb.note("Installing complementary packages ...")
bb.note('Running %s' % cmd)
complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- self.install(complementary_pkgs.split(), attempt_only=True)
+ complementary_pkgs = set(complementary_pkgs.split())
+ skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
+ install_pkgs = sorted(complementary_pkgs - provided_pkgs)
+ bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
+ ' '.join(install_pkgs),
+ ' '.join(skip_pkgs)))
+ self.install(install_pkgs, attempt_only=True)
except subprocess.CalledProcessError as e:
bb.fatal("Could not compute complementary packages list. Command "
"'%s' returned %d:\n%s" %
@@ -1619,7 +1629,7 @@ class DpkgPM(OpkgDpkgPM):
os.environ['APT_CONFIG'] = self.apt_conf_file
- cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
+ cmd = "%s %s install --force-yes --allow-unauthenticated --no-remove %s" % \
(self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
try:
@@ -1781,8 +1791,7 @@ class DpkgPM(OpkgDpkgPM):
open(os.path.join(target_dpkg_dir, "available"), "w+").close()
def remove_packaging_data(self):
- bb.utils.remove(os.path.join(self.target_rootfs,
- self.d.getVar('opkglibdir')), True)
+ bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True)
bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
def fix_broken_dependencies(self):
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index b1132ccb11..3a5ef8d921 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -3,6 +3,10 @@
#
def prserv_make_conn(d, check = False):
+ # Otherwise this fails when called from recipes which e.g. inherit python3native (which sets _PYTHON_SYSCONFIGDATA_NAME) with:
+ # No module named '_sysconfigdata'
+ if '_PYTHON_SYSCONFIGDATA_NAME' in os.environ:
+ del os.environ['_PYTHON_SYSCONFIGDATA_NAME']
import prserv.serv
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index c566ce5a0c..f1abff0c45 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -103,6 +103,7 @@ class SignatureGeneratorOEBasicHashMixIn(object):
self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
+ self.buildarch = data.getVar('BUILD_ARCH')
pass
def tasks_resolved(self, virtmap, virtpnmap, dataCache):
@@ -140,8 +141,23 @@ class SignatureGeneratorOEBasicHashMixIn(object):
self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
+ def prep_taskhash(self, tid, deps, dataCache):
+ super().prep_taskhash(tid, deps, dataCache)
+ if hasattr(self, "extramethod"):
+ (_, _, _, fn) = bb.runqueue.split_tid_mcfn(tid)
+ inherits = " ".join(dataCache.inherits[fn])
+ if inherits.find("/native.bbclass") != -1 or inherits.find("/cross.bbclass") != -1:
+ self.extramethod[tid] = ":" + self.buildarch
+
def get_taskhash(self, tid, deps, dataCache):
h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(tid, deps, dataCache)
+ if tid in self.lockedhashes:
+ if self.lockedhashes[tid]:
+ return self.lockedhashes[tid]
+ else:
+ return h
+
+ h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(tid, deps, dataCache)
(mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
@@ -178,17 +194,19 @@ class SignatureGeneratorOEBasicHashMixIn(object):
% (recipename, task, h, h_locked, var))
return h_locked
+
+ self.lockedhashes[tid] = False
#bb.warn("%s %s %s" % (recipename, task, h))
return h
def get_unihash(self, tid):
- if tid in self.lockedhashes:
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
return self.lockedhashes[tid]
return super().get_unihash(tid)
def dump_sigtask(self, fn, task, stampbase, runtime):
tid = fn + ":" + task
- if tid in self.lockedhashes:
+ if tid in self.lockedhashes and self.lockedhashes[tid]:
return
super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
@@ -512,8 +530,12 @@ def OEOuthashBasic(path, sigfile, task, d):
add_perm(stat.S_IXOTH, 'x')
if include_owners:
- update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
- update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
+ try:
+ update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
+ update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
+ except KeyError:
+ bb.warn("KeyError in %s" % path)
+ raise
update_hash(" ")
if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index 652b2be145..144c123a0e 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -387,7 +387,7 @@ def host_gcc_version(d, taskcontextonly=False):
except subprocess.CalledProcessError as e:
bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
- match = re.match(r".* (\d\.\d)\.\d.*", output.split('\n')[0])
+ match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
if not match:
bb.fatal("Can't get compiler version from %s --version output" % compiler)
diff --git a/meta/lib/oeqa/core/utils/concurrencytest.py b/meta/lib/oeqa/core/utils/concurrencytest.py
index 0f7b3dcc11..e6b14da89d 100644
--- a/meta/lib/oeqa/core/utils/concurrencytest.py
+++ b/meta/lib/oeqa/core/utils/concurrencytest.py
@@ -261,7 +261,7 @@ def fork_for_tests(concurrency_num, suite):
oe.path.copytree(selftestdir, newselftestdir)
for e in os.environ:
- if builddir in os.environ[e]:
+ if builddir + "/" in os.environ[e] or os.environ[e].endswith(builddir):
os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True)
diff --git a/meta/lib/oeqa/sdkext/testsdk.py b/meta/lib/oeqa/sdkext/testsdk.py
index 785b5dda53..c5c46df6cd 100644
--- a/meta/lib/oeqa/sdkext/testsdk.py
+++ b/meta/lib/oeqa/sdkext/testsdk.py
@@ -25,11 +25,8 @@ class TestSDKExt(TestSDKBase):
subprocesstweak.errors_have_output()
- # extensible sdk can be contaminated if native programs are
- # in PATH, i.e. use perl-native instead of eSDK one.
- paths_to_avoid = [d.getVar('STAGING_DIR'),
- d.getVar('BASE_WORKDIR')]
- os.environ['PATH'] = avoid_paths_in_environ(paths_to_avoid)
+ # We need the original PATH for testing the eSDK, not with our manipulations
+ os.environ['PATH'] = d.getVar("BB_ORIGENV", False).getVar("PATH")
tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.sh")
if not os.path.exists(tcname):
diff --git a/meta/lib/oeqa/selftest/cases/incompatible_lic.py b/meta/lib/oeqa/selftest/cases/incompatible_lic.py
index 8fb93af8a8..3cc5bbc35c 100644
--- a/meta/lib/oeqa/selftest/cases/incompatible_lic.py
+++ b/meta/lib/oeqa/selftest/cases/incompatible_lic.py
@@ -12,12 +12,12 @@ class IncompatibleLicenseTests(OESelftestTestCase):
if error_msg not in result.output:
raise AssertionError(result.output)
- # Verify that a package with an SPDX license (from SRC_DISTRIBUTE_LICENSES)
+ # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
# cannot be built when INCOMPATIBLE_LICENSE contains this SPDX license
def test_incompatible_spdx_license(self):
self.lic_test('incompatible-license', 'GPL-3.0', 'GPL-3.0')
- # Verify that a package with an SPDX license (from SRC_DISTRIBUTE_LICENSES)
+ # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
# cannot be built when INCOMPATIBLE_LICENSE contains an alias (in
# SPDXLICENSEMAP) of this SPDX license
def test_incompatible_alias_spdx_license(self):
@@ -35,7 +35,7 @@ class IncompatibleLicenseTests(OESelftestTestCase):
self.lic_test('incompatible-license-alias', 'GPLv3', 'GPLv3')
# Verify that a package with a non-SPDX license (neither in
- # SRC_DISTRIBUTE_LICENSES nor in SPDXLICENSEMAP) cannot be built when
+ # AVAILABLE_LICENSES nor in SPDXLICENSEMAP) cannot be built when
# INCOMPATIBLE_LICENSE contains this license
def test_incompatible_nonspdx_license(self):
self.lic_test('incompatible-nonspdx-license', 'FooLicense', 'FooLicense')
diff --git a/meta/lib/oeqa/selftest/cases/reproducible.py b/meta/lib/oeqa/selftest/cases/reproducible.py
index a9110565a9..1b0b5bae70 100644
--- a/meta/lib/oeqa/selftest/cases/reproducible.py
+++ b/meta/lib/oeqa/selftest/cases/reproducible.py
@@ -174,6 +174,8 @@ class ReproducibleTests(OESelftestTestCase):
# NOTE: The temp directories from the reproducible build are purposely
# kept after the build so it can be diffed for debugging.
+ fails = []
+
for c in self.package_classes:
with self.subTest(package_class=c):
package_class = 'package_' + c
@@ -197,6 +199,9 @@ class ReproducibleTests(OESelftestTestCase):
self.copy_file(d.test, '/'.join([save_dir, d.test]))
if result.missing or result.different:
- self.fail("The following %s packages are missing or different: %s" %
- (c, ' '.join(r.test for r in (result.missing + result.different))))
+ fails.append("The following %s packages are missing or different: %s" %
+ (c, '\n'.join(r.test for r in (result.missing + result.different))))
+
+ if fails:
+ self.fail('\n'.join(fails))
diff --git a/meta/lib/oeqa/selftest/cases/runtime_test.py b/meta/lib/oeqa/selftest/cases/runtime_test.py
index 7d3922ce44..d4fea91350 100644
--- a/meta/lib/oeqa/selftest/cases/runtime_test.py
+++ b/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -166,7 +166,7 @@ class TestImage(OESelftestTestCase):
bitbake('core-image-full-cmdline socat')
bitbake('-c testimage core-image-full-cmdline')
- def test_testimage_virgl_gtk(self):
+ def disabled_test_testimage_virgl_gtk(self):
"""
Summary: Check host-assisted accelerate OpenGL functionality in qemu with gtk frontend
Expected: 1. Check that virgl kernel driver is loaded and 3d acceleration is enabled
diff --git a/meta/lib/oeqa/selftest/cases/signing.py b/meta/lib/oeqa/selftest/cases/signing.py
index 5c4e01b2c3..5b8f9bbd38 100644
--- a/meta/lib/oeqa/selftest/cases/signing.py
+++ b/meta/lib/oeqa/selftest/cases/signing.py
@@ -44,7 +44,9 @@ class Signing(OESelftestTestCase):
origenv = os.environ.copy()
for e in os.environ:
- if builddir in os.environ[e]:
+ if builddir + "/" in os.environ[e]:
+ os.environ[e] = os.environ[e].replace(builddir + "/", newbuilddir + "/")
+ if os.environ[e].endswith(builddir):
os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
os.chdir(newbuilddir)
diff --git a/meta/lib/oeqa/selftest/cases/sstatetests.py b/meta/lib/oeqa/selftest/cases/sstatetests.py
index 6757a0ec68..9adb511960 100644
--- a/meta/lib/oeqa/selftest/cases/sstatetests.py
+++ b/meta/lib/oeqa/selftest/cases/sstatetests.py
@@ -446,6 +446,46 @@ BB_SIGNATURE_HANDLER = "OEBasicHash"
self.assertCountEqual(files1, files2)
+ def test_sstate_multilib_or_not_native_samesigs(self):
+ """The sstate checksums of two native recipes (and their dependencies)
+ where the target is using multilib in one but not the other
+ should be the same. We use the qemux86copy machine to test
+ this.
+ """
+
+ self.write_config("""
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+TCLIBCAPPEND = \"\"
+MACHINE = \"qemux86\"
+require conf/multilib.conf
+MULTILIBS = "multilib:lib32"
+DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ bitbake("binutils-native -S none")
+ self.write_config("""
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+TCLIBCAPPEND = \"\"
+MACHINE = \"qemux86copy\"
+BB_SIGNATURE_HANDLER = "OEBasicHash"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
+ bitbake("binutils-native -S none")
+
+ def get_files(d):
+ f = []
+ for root, dirs, files in os.walk(d):
+ for name in files:
+ f.append(os.path.join(root, name))
+ return f
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps")
+ files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
+ self.maxDiff = None
+ self.assertCountEqual(files1, files2)
+
+
def test_sstate_noop_samesigs(self):
"""
The sstate checksums of two builds with these variables changed or
diff --git a/meta/lib/oeqa/selftest/context.py b/meta/lib/oeqa/selftest/context.py
index c4eb5d614e..3d3b19c6e8 100644
--- a/meta/lib/oeqa/selftest/context.py
+++ b/meta/lib/oeqa/selftest/context.py
@@ -280,11 +280,15 @@ class OESelftestTestContextExecutor(OETestContextExecutor):
return rc
def _signal_clean_handler(self, signum, frame):
- sys.exit(1)
+ if self.ourpid == os.getpid():
+ sys.exit(1)
def run(self, logger, args):
self._process_args(logger, args)
+ # Setup a SIGTERM handler to allow restoration of files like local.conf and bblayers.conf
+ # but don't interfer with other processes
+ self.ourpid = os.getpid()
signal.signal(signal.SIGTERM, self._signal_clean_handler)
rc = None
diff --git a/meta/lib/oeqa/targetcontrol.py b/meta/lib/oeqa/targetcontrol.py
index 1445e3ecfb..41557dc224 100644
--- a/meta/lib/oeqa/targetcontrol.py
+++ b/meta/lib/oeqa/targetcontrol.py
@@ -117,9 +117,9 @@ class QemuTarget(BaseTarget):
import oe.path
bb.utils.mkdirhier(self.testdir)
self.qemurunnerlog = os.path.join(self.testdir, 'qemurunner_log.%s' % self.datetime)
- loggerhandler = logging.FileHandler(self.qemurunnerlog)
- loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
- self.logger.addHandler(loggerhandler)
+ self.loggerhandler = logging.FileHandler(self.qemurunnerlog)
+ self.loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+ self.logger.addHandler(self.loggerhandler)
oe.path.symlink(os.path.basename(self.qemurunnerlog), os.path.join(self.testdir, 'qemurunner_log'), force=True)
if d.getVar("DISTRO") == "poky-tiny":
@@ -182,6 +182,7 @@ class QemuTarget(BaseTarget):
def stop(self):
self.runner.stop()
+ self.logger.removeHandler(self.loggerhandler)
self.connection = None
self.ip = None
self.server_ip = None
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py
index fe8b77d97a..3db177b001 100644
--- a/meta/lib/oeqa/utils/qemurunner.py
+++ b/meta/lib/oeqa/utils/qemurunner.py
@@ -35,6 +35,7 @@ class QemuRunner:
# Popen object for runqemu
self.runqemu = None
+ self.runqemu_exited = False
# pid of the qemu process that runqemu will start
self.qemupid = None
# target ip - from the command line or runqemu output
@@ -102,7 +103,6 @@ class QemuRunner:
self.logger.debug("Output from runqemu:\n%s" % self.getOutput(self.runqemu.stdout))
self.stop()
self._dump_host()
- raise SystemExit
def start(self, qemuparams = None, get_ip = True, extra_bootparams = None, runqemuparams='', launch_cmd=None, discard_writes=True):
env = os.environ.copy()
@@ -206,6 +206,8 @@ class QemuRunner:
endtime = time.time() + self.runqemutime
while not self.is_alive() and time.time() < endtime:
if self.runqemu.poll():
+ if self.runqemu_exited:
+ return False
if self.runqemu.returncode:
# No point waiting any longer
self.logger.warning('runqemu exited with code %d' % self.runqemu.returncode)
@@ -215,6 +217,9 @@ class QemuRunner:
return False
time.sleep(0.5)
+ if self.runqemu_exited:
+ return False
+
if not self.is_alive():
self.logger.error("Qemu pid didn't appear in %s seconds (%s)" %
(self.runqemutime, time.strftime("%D %H:%M:%S")))
@@ -385,7 +390,7 @@ class QemuRunner:
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
self.runqemu.stdin.close()
self.runqemu.stdout.close()
- self.runqemu = None
+ self.runqemu_exited = True
if hasattr(self, 'server_socket') and self.server_socket:
self.server_socket.close()
@@ -396,7 +401,10 @@ class QemuRunner:
self.qemupid = None
self.ip = None
if os.path.exists(self.qemu_pidfile):
- os.remove(self.qemu_pidfile)
+ try:
+ os.remove(self.qemu_pidfile)
+ except FileNotFoundError as e:
+ self.logger.warning('qemu pidfile is no longer present')
if self.monitorpipe:
self.monitorpipe.close()
@@ -422,7 +430,7 @@ class QemuRunner:
return False
def is_alive(self):
- if not self.runqemu or self.runqemu.poll() is not None:
+ if not self.runqemu or self.runqemu.poll() is not None or self.runqemu_exited:
return False
if os.path.isfile(self.qemu_pidfile):
# when handling pidfile, qemu creates the file, stat it, lock it and then write to it
diff --git a/meta/recipes-bsp/u-boot/u-boot-tools.inc b/meta/recipes-bsp/u-boot/u-boot-tools.inc
new file mode 100644
index 0000000000..35894e1a8f
--- /dev/null
+++ b/meta/recipes-bsp/u-boot/u-boot-tools.inc
@@ -0,0 +1,65 @@
+SUMMARY = "U-Boot bootloader tools"
+DEPENDS += "openssl"
+
+PROVIDES = "${MLPREFIX}u-boot-mkimage ${MLPREFIX}u-boot-mkenvimage"
+PROVIDES_class-native = "u-boot-mkimage-native u-boot-mkenvimage-native"
+
+PACKAGES += "${PN}-mkimage ${PN}-mkenvimage"
+
+# Required for backward compatibility with "u-boot-mkimage-xxx.bb"
+RPROVIDES_${PN}-mkimage = "u-boot-mkimage"
+RREPLACES_${PN}-mkimage = "u-boot-mkimage"
+RCONFLICTS_${PN}-mkimage = "u-boot-mkimage"
+
+EXTRA_OEMAKE_class-target = 'CROSS_COMPILE="${TARGET_PREFIX}" CC="${CC} ${CFLAGS} ${LDFLAGS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" STRIP=true V=1'
+EXTRA_OEMAKE_class-native = 'CC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" STRIP=true V=1'
+EXTRA_OEMAKE_class-nativesdk = 'CROSS_COMPILE="${HOST_PREFIX}" CC="${CC} ${CFLAGS} ${LDFLAGS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" STRIP=true V=1'
+
+SED_CONFIG_EFI = '-e "s/CONFIG_EFI_LOADER=.*/# CONFIG_EFI_LOADER is not set/"'
+SED_CONFIG_EFI_x86 = ''
+SED_CONFIG_EFI_x86-64 = ''
+SED_CONFIG_EFI_arm = ''
+SED_CONFIG_EFI_armeb = ''
+SED_CONFIG_EFI_aarch64 = ''
+
+do_compile () {
+ oe_runmake sandbox_defconfig
+
+ # Disable CONFIG_CMD_LICENSE, license.h is not used by tools and
+ # generating it requires bin2header tool, which for target build
+ # is built with target tools and thus cannot be executed on host.
+ sed -i -e "s/CONFIG_CMD_LICENSE=.*/# CONFIG_CMD_LICENSE is not set/" ${SED_CONFIG_EFI} .config
+
+ oe_runmake cross_tools NO_SDL=1
+}
+
+do_install () {
+ install -d ${D}${bindir}
+
+ # mkimage
+ install -m 0755 tools/mkimage ${D}${bindir}/uboot-mkimage
+ ln -sf uboot-mkimage ${D}${bindir}/mkimage
+
+ # mkenvimage
+ install -m 0755 tools/mkenvimage ${D}${bindir}/uboot-mkenvimage
+ ln -sf uboot-mkenvimage ${D}${bindir}/mkenvimage
+
+ # dumpimage
+ install -m 0755 tools/dumpimage ${D}${bindir}/uboot-dumpimage
+ ln -sf uboot-dumpimage ${D}${bindir}/dumpimage
+
+ # fit_check_sign
+ install -m 0755 tools/fit_check_sign ${D}${bindir}/uboot-fit_check_sign
+ ln -sf uboot-fit_check_sign ${D}${bindir}/fit_check_sign
+}
+
+ALLOW_EMPTY_${PN} = "1"
+FILES_${PN} = ""
+FILES_${PN}-mkimage = "${bindir}/uboot-mkimage ${bindir}/mkimage ${bindir}/uboot-dumpimage ${bindir}/dumpimage ${bindir}/uboot-fit_check_sign ${bindir}/fit_check_sign"
+FILES_${PN}-mkenvimage = "${bindir}/uboot-mkenvimage ${bindir}/mkenvimage"
+
+RDEPENDS_${PN}-mkimage += "dtc"
+RDEPENDS_${PN} += "${PN}-mkimage ${PN}-mkenvimage"
+RDEPENDS_${PN}_class-native = ""
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-bsp/u-boot/u-boot-tools_2019.07.bb b/meta/recipes-bsp/u-boot/u-boot-tools_2019.07.bb
index bede984ef7..7eaf721ca8 100644
--- a/meta/recipes-bsp/u-boot/u-boot-tools_2019.07.bb
+++ b/meta/recipes-bsp/u-boot/u-boot-tools_2019.07.bb
@@ -1,67 +1,2 @@
require u-boot-common.inc
-
-SUMMARY = "U-Boot bootloader tools"
-DEPENDS += "openssl"
-
-PROVIDES = "${MLPREFIX}u-boot-mkimage ${MLPREFIX}u-boot-mkenvimage"
-PROVIDES_class-native = "u-boot-mkimage-native u-boot-mkenvimage-native"
-
-PACKAGES += "${PN}-mkimage ${PN}-mkenvimage"
-
-# Required for backward compatibility with "u-boot-mkimage-xxx.bb"
-RPROVIDES_${PN}-mkimage = "u-boot-mkimage"
-RREPLACES_${PN}-mkimage = "u-boot-mkimage"
-RCONFLICTS_${PN}-mkimage = "u-boot-mkimage"
-
-EXTRA_OEMAKE_class-target = 'CROSS_COMPILE="${TARGET_PREFIX}" CC="${CC} ${CFLAGS} ${LDFLAGS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" STRIP=true V=1'
-EXTRA_OEMAKE_class-native = 'CC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" STRIP=true V=1'
-EXTRA_OEMAKE_class-nativesdk = 'CROSS_COMPILE="${HOST_PREFIX}" CC="${CC} ${CFLAGS} ${LDFLAGS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" STRIP=true V=1'
-
-SED_CONFIG_EFI = '-e "s/CONFIG_EFI_LOADER=.*/# CONFIG_EFI_LOADER is not set/"'
-SED_CONFIG_EFI_x86 = ''
-SED_CONFIG_EFI_x86-64 = ''
-SED_CONFIG_EFI_arm = ''
-SED_CONFIG_EFI_armeb = ''
-SED_CONFIG_EFI_aarch64 = ''
-
-do_compile () {
- oe_runmake sandbox_defconfig
-
- # Disable CONFIG_CMD_LICENSE, license.h is not used by tools and
- # generating it requires bin2header tool, which for target build
- # is built with target tools and thus cannot be executed on host.
- sed -i -e "s/CONFIG_CMD_LICENSE=.*/# CONFIG_CMD_LICENSE is not set/" ${SED_CONFIG_EFI} .config
-
- oe_runmake cross_tools NO_SDL=1
-}
-
-do_install () {
- install -d ${D}${bindir}
-
- # mkimage
- install -m 0755 tools/mkimage ${D}${bindir}/uboot-mkimage
- ln -sf uboot-mkimage ${D}${bindir}/mkimage
-
- # mkenvimage
- install -m 0755 tools/mkenvimage ${D}${bindir}/uboot-mkenvimage
- ln -sf uboot-mkenvimage ${D}${bindir}/mkenvimage
-
- # dumpimage
- install -m 0755 tools/dumpimage ${D}${bindir}/uboot-dumpimage
- ln -sf uboot-dumpimage ${D}${bindir}/dumpimage
-
- # fit_check_sign
- install -m 0755 tools/fit_check_sign ${D}${bindir}/uboot-fit_check_sign
- ln -sf uboot-fit_check_sign ${D}${bindir}/fit_check_sign
-}
-
-ALLOW_EMPTY_${PN} = "1"
-FILES_${PN} = ""
-FILES_${PN}-mkimage = "${bindir}/uboot-mkimage ${bindir}/mkimage ${bindir}/uboot-dumpimage ${bindir}/dumpimage ${bindir}/uboot-fit_check_sign ${bindir}/fit_check_sign"
-FILES_${PN}-mkenvimage = "${bindir}/uboot-mkenvimage ${bindir}/mkenvimage"
-
-RDEPENDS_${PN}-mkimage += "dtc"
-RDEPENDS_${PN} += "${PN}-mkimage ${PN}-mkenvimage"
-RDEPENDS_${PN}_class-native = ""
-
-BBCLASSEXTEND = "native nativesdk"
+require u-boot-tools.inc
diff --git a/meta/recipes-bsp/u-boot/u-boot.inc b/meta/recipes-bsp/u-boot/u-boot.inc
index 9a754fd09b..d241347bf7 100644
--- a/meta/recipes-bsp/u-boot/u-boot.inc
+++ b/meta/recipes-bsp/u-boot/u-boot.inc
@@ -87,6 +87,8 @@ do_configure () {
fi
merge_config.sh -m .config ${@" ".join(find_cfgs(d))}
cml1_do_configure
+ else
+ DEVTOOL_DISABLE_MENUCONFIG=true
fi
}
diff --git a/meta/recipes-connectivity/avahi/avahi.inc b/meta/recipes-connectivity/avahi/avahi.inc
index 94fe6a16b6..6acedb5412 100644
--- a/meta/recipes-connectivity/avahi/avahi.inc
+++ b/meta/recipes-connectivity/avahi/avahi.inc
@@ -77,6 +77,11 @@ do_install() {
rm -rf ${D}${datadir}/dbus-1/interfaces
test -d ${D}${datadir}/dbus-1 && rmdir --ignore-fail-on-non-empty ${D}${datadir}/dbus-1
rm -rf ${D}${libdir}/avahi
+
+ # Move example service files out of /etc/avahi/services so we don't
+ # advertise ssh & sftp-ssh by default
+ install -d ${D}${docdir}/avahi
+ mv ${D}${sysconfdir}/avahi/services/* ${D}${docdir}/avahi
}
PACKAGES =+ "${@bb.utils.contains("PACKAGECONFIG", "libdns_sd", "libavahi-compat-libdnssd", "", d)}"
diff --git a/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch b/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch
deleted file mode 100644
index 2fed99e1bb..0000000000
--- a/meta/recipes-connectivity/bind/bind/0001-bind-fix-CVE-2019-6471.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-Backport patch to fix CVE-2019-6471.
-
-Ref:
-https://security-tracker.debian.org/tracker/CVE-2019-6471
-
-CVE: CVE-2019-6471
-Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/3a9c7bb]
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-From 3a9c7bb80d4a609b86427406d9dd783199920b5b Mon Sep 17 00:00:00 2001
-From: Mark Andrews <marka@isc.org>
-Date: Tue, 19 Mar 2019 14:14:21 +1100
-Subject: [PATCH] move item_out test inside lock in dns_dispatch_getnext()
-
-(cherry picked from commit 60c42f849d520564ed42e5ed0ba46b4b69c07712)
----
- lib/dns/dispatch.c | 12 ++++++++----
- 1 file changed, 8 insertions(+), 4 deletions(-)
-
-diff --git a/lib/dns/dispatch.c b/lib/dns/dispatch.c
-index 408beda367..3278db4a07 100644
---- a/lib/dns/dispatch.c
-+++ b/lib/dns/dispatch.c
-@@ -134,7 +134,7 @@ struct dns_dispentry {
- isc_task_t *task;
- isc_taskaction_t action;
- void *arg;
-- bool item_out;
-+ bool item_out;
- dispsocket_t *dispsocket;
- ISC_LIST(dns_dispatchevent_t) items;
- ISC_LINK(dns_dispentry_t) link;
-@@ -3422,13 +3422,14 @@ dns_dispatch_getnext(dns_dispentry_t *resp, dns_dispatchevent_t **sockevent) {
- disp = resp->disp;
- REQUIRE(VALID_DISPATCH(disp));
-
-- REQUIRE(resp->item_out == true);
-- resp->item_out = false;
--
- ev = *sockevent;
- *sockevent = NULL;
-
- LOCK(&disp->lock);
-+
-+ REQUIRE(resp->item_out == true);
-+ resp->item_out = false;
-+
- if (ev->buffer.base != NULL)
- free_buffer(disp, ev->buffer.base, ev->buffer.length);
- free_devent(disp, ev);
-@@ -3573,6 +3574,9 @@ dns_dispatch_removeresponse(dns_dispentry_t **resp,
- isc_task_send(disp->task[0], &disp->ctlevent);
- }
-
-+/*
-+ * disp must be locked.
-+ */
- static void
- do_cancel(dns_dispatch_t *disp) {
- dns_dispatchevent_t *ev;
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch b/meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch
index 871bb2a5f6..9d31b98080 100644
--- a/meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch
+++ b/meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch
@@ -1,4 +1,4 @@
-From 950867d9fd3f690e271c8c807b6eed144b2935b2 Mon Sep 17 00:00:00 2001
+From 2325a92f1896a2a7f586611686801b41fbc91b50 Mon Sep 17 00:00:00 2001
From: Hongxu Jia <hongxu.jia@windriver.com>
Date: Mon, 27 Aug 2018 15:00:51 +0800
Subject: [PATCH] configure.in: remove useless `-L$use_openssl/lib'
@@ -10,15 +10,16 @@ and helpful for clean up host build path in isc-config.sh
Upstream-Status: Inappropriate [oe-core specific]
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+
---
- configure.in | 2 +-
+ configure.ac | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/configure.in b/configure.in
-index 54efc55..76ac0eb 100644
---- a/configure.in
-+++ b/configure.in
-@@ -1691,7 +1691,7 @@ If you don't want OpenSSL, use --without-openssl])
+diff --git a/configure.ac b/configure.ac
+index e85a5c6..2bbfc58 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1631,7 +1631,7 @@ If you don't want OpenSSL, use --without-openssl])
fi
;;
*)
@@ -27,6 +28,3 @@ index 54efc55..76ac0eb 100644
;;
esac
fi
---
-2.7.4
-
diff --git a/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch b/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch
deleted file mode 100644
index 48ae125f84..0000000000
--- a/meta/recipes-connectivity/bind/bind/0001-fix-enforcement-of-tcp-clients-v1.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-Backport patch to fix CVE-2018-5743.
-
-Ref:
-https://security-tracker.debian.org/tracker/CVE-2018-5743
-
-CVE: CVE-2018-5743
-Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ec2d50d]
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-From ec2d50da8d81814640e28593d912f4b96c7efece Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org>
-Date: Thu, 3 Jan 2019 14:17:43 +0100
-Subject: [PATCH 1/6] fix enforcement of tcp-clients (v1)
-
-tcp-clients settings could be exceeded in some cases by
-creating more and more active TCP clients that are over
-the set quota limit, which in the end could lead to a
-DoS attack by e.g. exhaustion of file descriptors.
-
-If TCP client we're closing went over the quota (so it's
-not attached to a quota) mark it as mortal - so that it
-will be destroyed and not set up to listen for new
-connections - unless it's the last client for a specific
-interface.
-
-(cherry picked from commit f97131d21b97381cef72b971b157345c1f9b4115)
-(cherry picked from commit 9689ffc485df8f971f0ad81ab8ab1f5389493776)
----
- bin/named/client.c | 13 ++++++++++++-
- 1 file changed, 12 insertions(+), 1 deletion(-)
-
-diff --git a/bin/named/client.c b/bin/named/client.c
-index d482da7121..0739dd48af 100644
---- a/bin/named/client.c
-+++ b/bin/named/client.c
-@@ -421,8 +421,19 @@ exit_check(ns_client_t *client) {
- isc_socket_detach(&client->tcpsocket);
- }
-
-- if (client->tcpquota != NULL)
-+ if (client->tcpquota != NULL) {
- isc_quota_detach(&client->tcpquota);
-+ } else {
-+ /*
-+ * We went over quota with this client, we don't
-+ * want to restart listening unless this is the
-+ * last client on this interface, which is
-+ * checked later.
-+ */
-+ if (TCP_CLIENT(client)) {
-+ client->mortal = true;
-+ }
-+ }
-
- if (client->timerset) {
- (void)isc_timer_reset(client->timer,
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/bind/bind/0001-gen.c-extend-DIRNAMESIZE-from-256-to-512.patch b/meta/recipes-connectivity/bind/bind/0001-gen.c-extend-DIRNAMESIZE-from-256-to-512.patch
deleted file mode 100644
index a8d601dcaa..0000000000
--- a/meta/recipes-connectivity/bind/bind/0001-gen.c-extend-DIRNAMESIZE-from-256-to-512.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-Upstream-Status: Pending
-
-Subject: gen.c: extend DIRNAMESIZE from 256 to 512
-
-Signed-off-by: Chen Qi <Qi.Chen@windriver.com>
----
- lib/dns/gen.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-Index: bind-9.11.3/lib/dns/gen.c
-===================================================================
---- bind-9.11.3.orig/lib/dns/gen.c
-+++ bind-9.11.3/lib/dns/gen.c
-@@ -130,7 +130,7 @@ static const char copyright[] =
- #define TYPECLASSBUF (TYPECLASSLEN + 1)
- #define TYPECLASSFMT "%" STR(TYPECLASSLEN) "[-0-9a-z]_%d"
- #define ATTRIBUTESIZE 256
--#define DIRNAMESIZE 256
-+#define DIRNAMESIZE 512
-
- static struct cc {
- struct cc *next;
diff --git a/meta/recipes-connectivity/bind/bind/0001-lib-dns-gen.c-fix-too-long-error.patch b/meta/recipes-connectivity/bind/bind/0001-lib-dns-gen.c-fix-too-long-error.patch
deleted file mode 100644
index 01874a4407..0000000000
--- a/meta/recipes-connectivity/bind/bind/0001-lib-dns-gen.c-fix-too-long-error.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From 5bc3167a8b714ec0c4a3f1c7f3b9411296ec0a23 Mon Sep 17 00:00:00 2001
-From: Robert Yang <liezhi.yang@windriver.com>
-Date: Wed, 16 Sep 2015 20:23:47 -0700
-Subject: [PATCH] lib/dns/gen.c: fix too long error
-
-The 512 is a little short when build in deep dir, and cause "too long"
-error, use PATH_MAX if defined.
-
-Upstream-Status: Pending
-
-Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
----
- lib/dns/gen.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
-Index: bind-9.11.3/lib/dns/gen.c
-===================================================================
---- bind-9.11.3.orig/lib/dns/gen.c
-+++ bind-9.11.3/lib/dns/gen.c
-@@ -130,7 +130,11 @@ static const char copyright[] =
- #define TYPECLASSBUF (TYPECLASSLEN + 1)
- #define TYPECLASSFMT "%" STR(TYPECLASSLEN) "[-0-9a-z]_%d"
- #define ATTRIBUTESIZE 256
-+#ifdef PATH_MAX
-+#define DIRNAMESIZE PATH_MAX
-+#else
- #define DIRNAMESIZE 512
-+#endif
-
- static struct cc {
- struct cc *next;
diff --git a/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch b/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch
deleted file mode 100644
index ca4e8b1a66..0000000000
--- a/meta/recipes-connectivity/bind/bind/0002-tcp-clients-could-still-be-exceeded-v2.patch
+++ /dev/null
@@ -1,670 +0,0 @@
-Backport patch to fix CVE-2018-5743.
-
-Ref:
-https://security-tracker.debian.org/tracker/CVE-2018-5743
-
-CVE: CVE-2018-5743
-Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/719f604]
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-From 719f604e3fad5b7479bd14e2fa0ef4413f0a8fdc Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org>
-Date: Fri, 4 Jan 2019 12:50:51 +0100
-Subject: [PATCH 2/6] tcp-clients could still be exceeded (v2)
-
-the TCP client quota could still be ineffective under some
-circumstances. this change:
-
-- improves quota accounting to ensure that TCP clients are
- properly limited, while still guaranteeing that at least one client
- is always available to serve TCP connections on each interface.
-- uses more descriptive names and removes one (ntcptarget) that
- was no longer needed
-- adds comments
-
-(cherry picked from commit 924651f1d5e605cd186d03f4f7340bcc54d77cc2)
-(cherry picked from commit 55a7a458e30e47874d34bdf1079eb863a0512396)
----
- bin/named/client.c | 311 ++++++++++++++++++++-----
- bin/named/include/named/client.h | 14 +-
- bin/named/include/named/interfacemgr.h | 11 +-
- bin/named/interfacemgr.c | 8 +-
- 4 files changed, 267 insertions(+), 77 deletions(-)
-
-diff --git a/bin/named/client.c b/bin/named/client.c
-index 0739dd48af..a7b49a0f71 100644
---- a/bin/named/client.c
-+++ b/bin/named/client.c
-@@ -246,10 +246,11 @@ static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
- static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
- dns_dispatch_t *disp, bool tcp);
- static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
-- isc_socket_t *sock);
-+ isc_socket_t *sock, ns_client_t *oldclient);
- static inline bool
--allowed(isc_netaddr_t *addr, dns_name_t *signer, isc_netaddr_t *ecs_addr,
-- uint8_t ecs_addrlen, uint8_t *ecs_scope, dns_acl_t *acl);
-+allowed(isc_netaddr_t *addr, dns_name_t *signer,
-+ isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
-+ uint8_t *ecs_scope, dns_acl_t *acl)
- static void compute_cookie(ns_client_t *client, uint32_t when,
- uint32_t nonce, const unsigned char *secret,
- isc_buffer_t *buf);
-@@ -405,8 +406,11 @@ exit_check(ns_client_t *client) {
- */
- INSIST(client->recursionquota == NULL);
- INSIST(client->newstate <= NS_CLIENTSTATE_READY);
-- if (client->nreads > 0)
-+
-+ if (client->nreads > 0) {
- dns_tcpmsg_cancelread(&client->tcpmsg);
-+ }
-+
- if (client->nreads != 0) {
- /* Still waiting for read cancel completion. */
- return (true);
-@@ -416,25 +420,58 @@ exit_check(ns_client_t *client) {
- dns_tcpmsg_invalidate(&client->tcpmsg);
- client->tcpmsg_valid = false;
- }
-+
- if (client->tcpsocket != NULL) {
- CTRACE("closetcp");
- isc_socket_detach(&client->tcpsocket);
-+
-+ if (client->tcpactive) {
-+ LOCK(&client->interface->lock);
-+ INSIST(client->interface->ntcpactive > 0);
-+ client->interface->ntcpactive--;
-+ UNLOCK(&client->interface->lock);
-+ client->tcpactive = false;
-+ }
- }
-
- if (client->tcpquota != NULL) {
-- isc_quota_detach(&client->tcpquota);
-- } else {
- /*
-- * We went over quota with this client, we don't
-- * want to restart listening unless this is the
-- * last client on this interface, which is
-- * checked later.
-+ * If we are not in a pipeline group, or
-+ * we are the last client in the group, detach from
-+ * tcpquota; otherwise, transfer the quota to
-+ * another client in the same group.
- */
-- if (TCP_CLIENT(client)) {
-- client->mortal = true;
-+ if (!ISC_LINK_LINKED(client, glink) ||
-+ (client->glink.next == NULL &&
-+ client->glink.prev == NULL))
-+ {
-+ isc_quota_detach(&client->tcpquota);
-+ } else if (client->glink.next != NULL) {
-+ INSIST(client->glink.next->tcpquota == NULL);
-+ client->glink.next->tcpquota = client->tcpquota;
-+ client->tcpquota = NULL;
-+ } else {
-+ INSIST(client->glink.prev->tcpquota == NULL);
-+ client->glink.prev->tcpquota = client->tcpquota;
-+ client->tcpquota = NULL;
- }
- }
-
-+ /*
-+ * Unlink from pipeline group.
-+ */
-+ if (ISC_LINK_LINKED(client, glink)) {
-+ if (client->glink.next != NULL) {
-+ client->glink.next->glink.prev =
-+ client->glink.prev;
-+ }
-+ if (client->glink.prev != NULL) {
-+ client->glink.prev->glink.next =
-+ client->glink.next;
-+ }
-+ ISC_LINK_INIT(client, glink);
-+ }
-+
- if (client->timerset) {
- (void)isc_timer_reset(client->timer,
- isc_timertype_inactive,
-@@ -455,15 +492,16 @@ exit_check(ns_client_t *client) {
- * that already. Check whether this client needs to remain
- * active and force it to go inactive if not.
- *
-- * UDP clients go inactive at this point, but TCP clients
-- * may remain active if we have fewer active TCP client
-- * objects than desired due to an earlier quota exhaustion.
-+ * UDP clients go inactive at this point, but a TCP client
-+ * will needs to remain active if no other clients are
-+ * listening for TCP requests on this interface, to
-+ * prevent this interface from going nonresponsive.
- */
- if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
- LOCK(&client->interface->lock);
-- if (client->interface->ntcpcurrent <
-- client->interface->ntcptarget)
-+ if (client->interface->ntcpaccepting == 0) {
- client->mortal = false;
-+ }
- UNLOCK(&client->interface->lock);
- }
-
-@@ -472,15 +510,17 @@ exit_check(ns_client_t *client) {
- * queue for recycling.
- */
- if (client->mortal) {
-- if (client->newstate > NS_CLIENTSTATE_INACTIVE)
-+ if (client->newstate > NS_CLIENTSTATE_INACTIVE) {
- client->newstate = NS_CLIENTSTATE_INACTIVE;
-+ }
- }
-
- if (NS_CLIENTSTATE_READY == client->newstate) {
- if (TCP_CLIENT(client)) {
- client_accept(client);
-- } else
-+ } else {
- client_udprecv(client);
-+ }
- client->newstate = NS_CLIENTSTATE_MAX;
- return (true);
- }
-@@ -492,41 +532,57 @@ exit_check(ns_client_t *client) {
- /*
- * We are trying to enter the inactive state.
- */
-- if (client->naccepts > 0)
-+ if (client->naccepts > 0) {
- isc_socket_cancel(client->tcplistener, client->task,
- ISC_SOCKCANCEL_ACCEPT);
-+ }
-
- /* Still waiting for accept cancel completion. */
-- if (! (client->naccepts == 0))
-+ if (! (client->naccepts == 0)) {
- return (true);
-+ }
-
- /* Accept cancel is complete. */
-- if (client->nrecvs > 0)
-+ if (client->nrecvs > 0) {
- isc_socket_cancel(client->udpsocket, client->task,
- ISC_SOCKCANCEL_RECV);
-+ }
-
- /* Still waiting for recv cancel completion. */
-- if (! (client->nrecvs == 0))
-+ if (! (client->nrecvs == 0)) {
- return (true);
-+ }
-
- /* Still waiting for control event to be delivered */
-- if (client->nctls > 0)
-+ if (client->nctls > 0) {
- return (true);
--
-- /* Deactivate the client. */
-- if (client->interface)
-- ns_interface_detach(&client->interface);
-+ }
-
- INSIST(client->naccepts == 0);
- INSIST(client->recursionquota == NULL);
-- if (client->tcplistener != NULL)
-+ if (client->tcplistener != NULL) {
- isc_socket_detach(&client->tcplistener);
-
-- if (client->udpsocket != NULL)
-+ if (client->tcpactive) {
-+ LOCK(&client->interface->lock);
-+ INSIST(client->interface->ntcpactive > 0);
-+ client->interface->ntcpactive--;
-+ UNLOCK(&client->interface->lock);
-+ client->tcpactive = false;
-+ }
-+ }
-+ if (client->udpsocket != NULL) {
- isc_socket_detach(&client->udpsocket);
-+ }
-
-- if (client->dispatch != NULL)
-+ /* Deactivate the client. */
-+ if (client->interface != NULL) {
-+ ns_interface_detach(&client->interface);
-+ }
-+
-+ if (client->dispatch != NULL) {
- dns_dispatch_detach(&client->dispatch);
-+ }
-
- client->attributes = 0;
- client->mortal = false;
-@@ -551,10 +607,13 @@ exit_check(ns_client_t *client) {
- client->newstate = NS_CLIENTSTATE_MAX;
- if (!ns_g_clienttest && manager != NULL &&
- !manager->exiting)
-+ {
- ISC_QUEUE_PUSH(manager->inactive, client,
- ilink);
-- if (client->needshutdown)
-+ }
-+ if (client->needshutdown) {
- isc_task_shutdown(client->task);
-+ }
- return (true);
- }
- }
-@@ -675,7 +734,6 @@ client_start(isc_task_t *task, isc_event_t *event) {
- }
- }
-
--
- /*%
- * The client's task has received a shutdown event.
- */
-@@ -2507,17 +2565,12 @@ client_request(isc_task_t *task, isc_event_t *event) {
- /*
- * Pipeline TCP query processing.
- */
-- if (client->message->opcode != dns_opcode_query)
-+ if (client->message->opcode != dns_opcode_query) {
- client->pipelined = false;
-+ }
- if (TCP_CLIENT(client) && client->pipelined) {
-- result = isc_quota_reserve(&ns_g_server->tcpquota);
-- if (result == ISC_R_SUCCESS)
-- result = ns_client_replace(client);
-+ result = ns_client_replace(client);
- if (result != ISC_R_SUCCESS) {
-- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
-- NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
-- "no more TCP clients(read): %s",
-- isc_result_totext(result));
- client->pipelined = false;
- }
- }
-@@ -3087,6 +3140,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
- client->filter_aaaa = dns_aaaa_ok;
- #endif
- client->needshutdown = ns_g_clienttest;
-+ client->tcpactive = false;
-
- ISC_EVENT_INIT(&client->ctlevent, sizeof(client->ctlevent), 0, NULL,
- NS_EVENT_CLIENTCONTROL, client_start, client, client,
-@@ -3100,6 +3154,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
- client->formerrcache.id = 0;
- ISC_LINK_INIT(client, link);
- ISC_LINK_INIT(client, rlink);
-+ ISC_LINK_INIT(client, glink);
- ISC_QLINK_INIT(client, ilink);
- client->keytag = NULL;
- client->keytag_len = 0;
-@@ -3193,12 +3248,19 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
-
- INSIST(client->state == NS_CLIENTSTATE_READY);
-
-+ /*
-+ * The accept() was successful and we're now establishing a new
-+ * connection. We need to make note of it in the client and
-+ * interface objects so client objects can do the right thing
-+ * when going inactive in exit_check() (see comments in
-+ * client_accept() for details).
-+ */
- INSIST(client->naccepts == 1);
- client->naccepts--;
-
- LOCK(&client->interface->lock);
-- INSIST(client->interface->ntcpcurrent > 0);
-- client->interface->ntcpcurrent--;
-+ INSIST(client->interface->ntcpaccepting > 0);
-+ client->interface->ntcpaccepting--;
- UNLOCK(&client->interface->lock);
-
- /*
-@@ -3232,6 +3294,9 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
- "accept failed: %s",
- isc_result_totext(nevent->result));
-+ if (client->tcpquota != NULL) {
-+ isc_quota_detach(&client->tcpquota);
-+ }
- }
-
- if (exit_check(client))
-@@ -3270,18 +3335,12 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- * deny service to legitimate TCP clients.
- */
- client->pipelined = false;
-- result = isc_quota_attach(&ns_g_server->tcpquota,
-- &client->tcpquota);
-- if (result == ISC_R_SUCCESS)
-- result = ns_client_replace(client);
-- if (result != ISC_R_SUCCESS) {
-- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
-- NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
-- "no more TCP clients(accept): %s",
-- isc_result_totext(result));
-- } else if (ns_g_server->keepresporder == NULL ||
-- !allowed(&netaddr, NULL, NULL, 0, NULL,
-- ns_g_server->keepresporder)) {
-+ result = ns_client_replace(client);
-+ if (result == ISC_R_SUCCESS &&
-+ (client->sctx->keepresporder == NULL ||
-+ !allowed(&netaddr, NULL, NULL, 0, NULL,
-+ ns_g_server->keepresporder)))
-+ {
- client->pipelined = true;
- }
-
-@@ -3298,12 +3357,80 @@ client_accept(ns_client_t *client) {
-
- CTRACE("accept");
-
-+ /*
-+ * The tcpquota object can only be simultaneously referenced a
-+ * pre-defined number of times; this is configured by 'tcp-clients'
-+ * in named.conf. If we can't attach to it here, that means the TCP
-+ * client quota has been exceeded.
-+ */
-+ result = isc_quota_attach(&client->sctx->tcpquota,
-+ &client->tcpquota);
-+ if (result != ISC_R_SUCCESS) {
-+ bool exit;
-+
-+ ns_client_log(client, NS_LOGCATEGORY_CLIENT,
-+ NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1),
-+ "no more TCP clients: %s",
-+ isc_result_totext(result));
-+
-+ /*
-+ * We have exceeded the system-wide TCP client
-+ * quota. But, we can't just block this accept
-+ * in all cases, because if we did, a heavy TCP
-+ * load on other interfaces might cause this
-+ * interface to be starved, with no clients able
-+ * to accept new connections.
-+ *
-+ * So, we check here to see if any other client
-+ * is already servicing TCP queries on this
-+ * interface (whether accepting, reading, or
-+ * processing).
-+ *
-+ * If so, then it's okay *not* to call
-+ * accept - we can let this client to go inactive
-+ * and the other one handle the next connection
-+ * when it's ready.
-+ *
-+ * But if not, then we need to be a little bit
-+ * flexible about the quota. We allow *one* extra
-+ * TCP client through, to ensure we're listening on
-+ * every interface.
-+ *
-+ * (Note: In practice this means that the *real*
-+ * TCP client quota is tcp-clients plus the number
-+ * of interfaces.)
-+ */
-+ LOCK(&client->interface->lock);
-+ exit = (client->interface->ntcpactive > 0);
-+ UNLOCK(&client->interface->lock);
-+
-+ if (exit) {
-+ client->newstate = NS_CLIENTSTATE_INACTIVE;
-+ (void)exit_check(client);
-+ return;
-+ }
-+ }
-+
-+ /*
-+ * By incrementing the interface's ntcpactive counter we signal
-+ * that there is at least one client servicing TCP queries for the
-+ * interface.
-+ *
-+ * We also make note of the fact in the client itself with the
-+ * tcpactive flag. This ensures proper accounting by preventing
-+ * us from accidentally incrementing or decrementing ntcpactive
-+ * more than once per client object.
-+ */
-+ if (!client->tcpactive) {
-+ LOCK(&client->interface->lock);
-+ client->interface->ntcpactive++;
-+ UNLOCK(&client->interface->lock);
-+ client->tcpactive = true;
-+ }
-+
- result = isc_socket_accept(client->tcplistener, client->task,
- client_newconn, client);
- if (result != ISC_R_SUCCESS) {
-- UNEXPECTED_ERROR(__FILE__, __LINE__,
-- "isc_socket_accept() failed: %s",
-- isc_result_totext(result));
- /*
- * XXXRTH What should we do? We're trying to accept but
- * it didn't work. If we just give up, then TCP
-@@ -3311,12 +3438,39 @@ client_accept(ns_client_t *client) {
- *
- * For now, we just go idle.
- */
-+ UNEXPECTED_ERROR(__FILE__, __LINE__,
-+ "isc_socket_accept() failed: %s",
-+ isc_result_totext(result));
-+ if (client->tcpquota != NULL) {
-+ isc_quota_detach(&client->tcpquota);
-+ }
- return;
- }
-+
-+ /*
-+ * The client's 'naccepts' counter indicates that this client has
-+ * called accept() and is waiting for a new connection. It should
-+ * never exceed 1.
-+ */
- INSIST(client->naccepts == 0);
- client->naccepts++;
-+
-+ /*
-+ * The interface's 'ntcpaccepting' counter is incremented when
-+ * any client calls accept(), and decremented in client_newconn()
-+ * once the connection is established.
-+ *
-+ * When the client object is shutting down after handling a TCP
-+ * request (see exit_check()), it looks to see whether this value is
-+ * non-zero. If so, that means another client has already called
-+ * accept() and is waiting to establish the next connection, which
-+ * means the first client is free to go inactive. Otherwise,
-+ * the first client must come back and call accept() again; this
-+ * guarantees there will always be at least one client listening
-+ * for new TCP connections on each interface.
-+ */
- LOCK(&client->interface->lock);
-- client->interface->ntcpcurrent++;
-+ client->interface->ntcpaccepting++;
- UNLOCK(&client->interface->lock);
- }
-
-@@ -3390,13 +3544,14 @@ ns_client_replace(ns_client_t *client) {
- tcp = TCP_CLIENT(client);
- if (tcp && client->pipelined) {
- result = get_worker(client->manager, client->interface,
-- client->tcpsocket);
-+ client->tcpsocket, client);
- } else {
- result = get_client(client->manager, client->interface,
- client->dispatch, tcp);
- }
-- if (result != ISC_R_SUCCESS)
-+ if (result != ISC_R_SUCCESS) {
- return (result);
-+ }
-
- /*
- * The responsibility for listening for new requests is hereby
-@@ -3585,6 +3740,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
- client->attributes |= NS_CLIENTATTR_TCP;
- isc_socket_attach(ifp->tcpsocket,
- &client->tcplistener);
-+
- } else {
- isc_socket_t *sock;
-
-@@ -3602,7 +3758,8 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
- }
-
- static isc_result_t
--get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
-+get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
-+ ns_client_t *oldclient)
- {
- isc_result_t result = ISC_R_SUCCESS;
- isc_event_t *ev;
-@@ -3610,6 +3767,7 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
- MTRACE("get worker");
-
- REQUIRE(manager != NULL);
-+ REQUIRE(oldclient != NULL);
-
- if (manager->exiting)
- return (ISC_R_SHUTTINGDOWN);
-@@ -3642,7 +3800,28 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
- ns_interface_attach(ifp, &client->interface);
- client->newstate = client->state = NS_CLIENTSTATE_WORKING;
- INSIST(client->recursionquota == NULL);
-- client->tcpquota = &ns_g_server->tcpquota;
-+
-+ /*
-+ * Transfer TCP quota to the new client.
-+ */
-+ INSIST(client->tcpquota == NULL);
-+ INSIST(oldclient->tcpquota != NULL);
-+ client->tcpquota = oldclient->tcpquota;
-+ oldclient->tcpquota = NULL;
-+
-+ /*
-+ * Link to a pipeline group, creating it if needed.
-+ */
-+ if (!ISC_LINK_LINKED(oldclient, glink)) {
-+ oldclient->glink.next = NULL;
-+ oldclient->glink.prev = NULL;
-+ }
-+ client->glink.next = oldclient->glink.next;
-+ client->glink.prev = oldclient;
-+ if (oldclient->glink.next != NULL) {
-+ oldclient->glink.next->glink.prev = client;
-+ }
-+ oldclient->glink.next = client;
-
- client->dscp = ifp->dscp;
-
-@@ -3656,6 +3835,12 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock)
- (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
- client->peeraddr_valid = true;
-
-+ LOCK(&client->interface->lock);
-+ client->interface->ntcpactive++;
-+ UNLOCK(&client->interface->lock);
-+
-+ client->tcpactive = true;
-+
- INSIST(client->tcpmsg_valid == false);
- dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
- client->tcpmsg_valid = true;
-diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
-index b23a7b191d..1f7973f9c5 100644
---- a/bin/named/include/named/client.h
-+++ b/bin/named/include/named/client.h
-@@ -94,7 +94,8 @@ struct ns_client {
- int nupdates;
- int nctls;
- int references;
-- bool needshutdown; /*
-+ bool tcpactive;
-+ bool needshutdown; /*
- * Used by clienttest to get
- * the client to go from
- * inactive to free state
-@@ -130,9 +131,9 @@ struct ns_client {
- isc_stdtime_t now;
- isc_time_t tnow;
- dns_name_t signername; /*%< [T]SIG key name */
-- dns_name_t * signer; /*%< NULL if not valid sig */
-- bool mortal; /*%< Die after handling request */
-- bool pipelined; /*%< TCP queries not in sequence */
-+ dns_name_t *signer; /*%< NULL if not valid sig */
-+ bool mortal; /*%< Die after handling request */
-+ bool pipelined; /*%< TCP queries not in sequence */
- isc_quota_t *tcpquota;
- isc_quota_t *recursionquota;
- ns_interface_t *interface;
-@@ -143,8 +144,8 @@ struct ns_client {
- isc_sockaddr_t destsockaddr;
-
- isc_netaddr_t ecs_addr; /*%< EDNS client subnet */
-- uint8_t ecs_addrlen;
-- uint8_t ecs_scope;
-+ uint8_t ecs_addrlen;
-+ uint8_t ecs_scope;
-
- struct in6_pktinfo pktinfo;
- isc_dscp_t dscp;
-@@ -166,6 +167,7 @@ struct ns_client {
-
- ISC_LINK(ns_client_t) link;
- ISC_LINK(ns_client_t) rlink;
-+ ISC_LINK(ns_client_t) glink;
- ISC_QLINK(ns_client_t) ilink;
- unsigned char cookie[8];
- uint32_t expire;
-diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
-index 7d1883e1e8..61b08826a6 100644
---- a/bin/named/include/named/interfacemgr.h
-+++ b/bin/named/include/named/interfacemgr.h
-@@ -77,9 +77,14 @@ struct ns_interface {
- /*%< UDP dispatchers. */
- isc_socket_t * tcpsocket; /*%< TCP socket. */
- isc_dscp_t dscp; /*%< "listen-on" DSCP value */
-- int ntcptarget; /*%< Desired number of concurrent
-- TCP accepts */
-- int ntcpcurrent; /*%< Current ditto, locked */
-+ int ntcpaccepting; /*%< Number of clients
-+ ready to accept new
-+ TCP connections on this
-+ interface */
-+ int ntcpactive; /*%< Number of clients
-+ servicing TCP queries
-+ (whether accepting or
-+ connected) */
- int nudpdispatch; /*%< Number of UDP dispatches */
- ns_clientmgr_t * clientmgr; /*%< Client manager. */
- ISC_LINK(ns_interface_t) link;
-diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
-index 419927bf54..955096ef47 100644
---- a/bin/named/interfacemgr.c
-+++ b/bin/named/interfacemgr.c
-@@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
- * connections will be handled in parallel even though there is
- * only one client initially.
- */
-- ifp->ntcptarget = 1;
-- ifp->ntcpcurrent = 0;
-+ ifp->ntcpaccepting = 0;
-+ ifp->ntcpactive = 0;
- ifp->nudpdispatch = 0;
-
- ifp->dscp = -1;
-@@ -522,9 +522,7 @@ ns_interface_accepttcp(ns_interface_t *ifp) {
- */
- (void)isc_socket_filter(ifp->tcpsocket, "dataready");
-
-- result = ns_clientmgr_createclients(ifp->clientmgr,
-- ifp->ntcptarget, ifp,
-- true);
-+ result = ns_clientmgr_createclients(ifp->clientmgr, 1, ifp, true);
- if (result != ISC_R_SUCCESS) {
- UNEXPECTED_ERROR(__FILE__, __LINE__,
- "TCP ns_clientmgr_createclients(): %s",
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch b/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch
deleted file mode 100644
index 032cfb8c44..0000000000
--- a/meta/recipes-connectivity/bind/bind/0003-use-reference-counter-for-pipeline-groups-v3.patch
+++ /dev/null
@@ -1,278 +0,0 @@
-Backport patch to fix CVE-2018-5743.
-
-Ref:
-https://security-tracker.debian.org/tracker/CVE-2018-5743
-
-CVE: CVE-2018-5743
-Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/366b4e1]
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-From 366b4e1ede8aed690e981e07137cb1cb77879c36 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= <michal@isc.org>
-Date: Thu, 17 Jan 2019 15:53:38 +0100
-Subject: [PATCH 3/6] use reference counter for pipeline groups (v3)
-
-Track pipeline groups using a shared reference counter
-instead of a linked list.
-
-(cherry picked from commit 513afd33eb17d5dc41a3f0d2d38204ef8c5f6f91)
-(cherry picked from commit 9446629b730c59c4215f08d37fbaf810282fbccb)
----
- bin/named/client.c | 171 ++++++++++++++++++++-----------
- bin/named/include/named/client.h | 2 +-
- 2 files changed, 110 insertions(+), 63 deletions(-)
-
-diff --git a/bin/named/client.c b/bin/named/client.c
-index a7b49a0f71..277656cef0 100644
---- a/bin/named/client.c
-+++ b/bin/named/client.c
-@@ -299,6 +299,75 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
- }
- }
-
-+/*%
-+ * Allocate a reference counter that will track the number of client structures
-+ * using the TCP connection that 'client' called accept() for. This counter
-+ * will be shared between all client structures associated with this TCP
-+ * connection.
-+ */
-+static void
-+pipeline_init(ns_client_t *client) {
-+ isc_refcount_t *refs;
-+
-+ REQUIRE(client->pipeline_refs == NULL);
-+
-+ /*
-+ * A global memory context is used for the allocation as different
-+ * client structures may have different memory contexts assigned and a
-+ * reference counter allocated here might need to be freed by a
-+ * different client. The performance impact caused by memory context
-+ * contention here is expected to be negligible, given that this code
-+ * is only executed for TCP connections.
-+ */
-+ refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
-+ isc_refcount_init(refs, 1);
-+ client->pipeline_refs = refs;
-+}
-+
-+/*%
-+ * Increase the count of client structures using the TCP connection that
-+ * 'source' is associated with and put a pointer to that count in 'target',
-+ * thus associating it with the same TCP connection.
-+ */
-+static void
-+pipeline_attach(ns_client_t *source, ns_client_t *target) {
-+ int old_refs;
-+
-+ REQUIRE(source->pipeline_refs != NULL);
-+ REQUIRE(target->pipeline_refs == NULL);
-+
-+ old_refs = isc_refcount_increment(source->pipeline_refs);
-+ INSIST(old_refs > 0);
-+ target->pipeline_refs = source->pipeline_refs;
-+}
-+
-+/*%
-+ * Decrease the count of client structures using the TCP connection that
-+ * 'client' is associated with. If this is the last client using this TCP
-+ * connection, free the reference counter and return true; otherwise, return
-+ * false.
-+ */
-+static bool
-+pipeline_detach(ns_client_t *client) {
-+ isc_refcount_t *refs;
-+ int old_refs;
-+
-+ REQUIRE(client->pipeline_refs != NULL);
-+
-+ refs = client->pipeline_refs;
-+ client->pipeline_refs = NULL;
-+
-+ old_refs = isc_refcount_decrement(refs);
-+ INSIST(old_refs > 0);
-+
-+ if (old_refs == 1) {
-+ isc_mem_free(client->sctx->mctx, refs);
-+ return (true);
-+ }
-+
-+ return (false);
-+}
-+
- /*%
- * Check for a deactivation or shutdown request and take appropriate
- * action. Returns true if either is in progress; in this case
-@@ -421,6 +490,40 @@ exit_check(ns_client_t *client) {
- client->tcpmsg_valid = false;
- }
-
-+ if (client->tcpquota != NULL) {
-+ if (client->pipeline_refs == NULL ||
-+ pipeline_detach(client))
-+ {
-+ /*
-+ * Only detach from the TCP client quota if
-+ * there are no more client structures using
-+ * this TCP connection.
-+ *
-+ * Note that we check 'pipeline_refs' and not
-+ * 'pipelined' because in some cases (e.g.
-+ * after receiving a request with an opcode
-+ * different than QUERY) 'pipelined' is set to
-+ * false after the reference counter gets
-+ * allocated in pipeline_init() and we must
-+ * still drop our reference as failing to do so
-+ * would prevent the reference counter itself
-+ * from being freed.
-+ */
-+ isc_quota_detach(&client->tcpquota);
-+ } else {
-+ /*
-+ * There are other client structures using this
-+ * TCP connection, so we cannot detach from the
-+ * TCP client quota to prevent excess TCP
-+ * connections from being accepted. However,
-+ * this client structure might later be reused
-+ * for accepting new connections and thus must
-+ * have its 'tcpquota' field set to NULL.
-+ */
-+ client->tcpquota = NULL;
-+ }
-+ }
-+
- if (client->tcpsocket != NULL) {
- CTRACE("closetcp");
- isc_socket_detach(&client->tcpsocket);
-@@ -434,44 +537,6 @@ exit_check(ns_client_t *client) {
- }
- }
-
-- if (client->tcpquota != NULL) {
-- /*
-- * If we are not in a pipeline group, or
-- * we are the last client in the group, detach from
-- * tcpquota; otherwise, transfer the quota to
-- * another client in the same group.
-- */
-- if (!ISC_LINK_LINKED(client, glink) ||
-- (client->glink.next == NULL &&
-- client->glink.prev == NULL))
-- {
-- isc_quota_detach(&client->tcpquota);
-- } else if (client->glink.next != NULL) {
-- INSIST(client->glink.next->tcpquota == NULL);
-- client->glink.next->tcpquota = client->tcpquota;
-- client->tcpquota = NULL;
-- } else {
-- INSIST(client->glink.prev->tcpquota == NULL);
-- client->glink.prev->tcpquota = client->tcpquota;
-- client->tcpquota = NULL;
-- }
-- }
--
-- /*
-- * Unlink from pipeline group.
-- */
-- if (ISC_LINK_LINKED(client, glink)) {
-- if (client->glink.next != NULL) {
-- client->glink.next->glink.prev =
-- client->glink.prev;
-- }
-- if (client->glink.prev != NULL) {
-- client->glink.prev->glink.next =
-- client->glink.next;
-- }
-- ISC_LINK_INIT(client, glink);
-- }
--
- if (client->timerset) {
- (void)isc_timer_reset(client->timer,
- isc_timertype_inactive,
-@@ -3130,6 +3195,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
- dns_name_init(&client->signername, NULL);
- client->mortal = false;
- client->pipelined = false;
-+ client->pipeline_refs = NULL;
- client->tcpquota = NULL;
- client->recursionquota = NULL;
- client->interface = NULL;
-@@ -3154,7 +3220,6 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
- client->formerrcache.id = 0;
- ISC_LINK_INIT(client, link);
- ISC_LINK_INIT(client, rlink);
-- ISC_LINK_INIT(client, glink);
- ISC_QLINK_INIT(client, ilink);
- client->keytag = NULL;
- client->keytag_len = 0;
-@@ -3341,6 +3406,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- !allowed(&netaddr, NULL, NULL, 0, NULL,
- ns_g_server->keepresporder)))
- {
-+ pipeline_init(client);
- client->pipelined = true;
- }
-
-@@ -3800,35 +3866,16 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
- ns_interface_attach(ifp, &client->interface);
- client->newstate = client->state = NS_CLIENTSTATE_WORKING;
- INSIST(client->recursionquota == NULL);
--
-- /*
-- * Transfer TCP quota to the new client.
-- */
-- INSIST(client->tcpquota == NULL);
-- INSIST(oldclient->tcpquota != NULL);
-- client->tcpquota = oldclient->tcpquota;
-- oldclient->tcpquota = NULL;
--
-- /*
-- * Link to a pipeline group, creating it if needed.
-- */
-- if (!ISC_LINK_LINKED(oldclient, glink)) {
-- oldclient->glink.next = NULL;
-- oldclient->glink.prev = NULL;
-- }
-- client->glink.next = oldclient->glink.next;
-- client->glink.prev = oldclient;
-- if (oldclient->glink.next != NULL) {
-- oldclient->glink.next->glink.prev = client;
-- }
-- oldclient->glink.next = client;
-+ client->tcpquota = &client->sctx->tcpquota;
-
- client->dscp = ifp->dscp;
-
- client->attributes |= NS_CLIENTATTR_TCP;
-- client->pipelined = true;
- client->mortal = true;
-
-+ pipeline_attach(oldclient, client);
-+ client->pipelined = true;
-+
- isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
- isc_socket_attach(sock, &client->tcpsocket);
- isc_socket_setname(client->tcpsocket, "worker-tcp", NULL);
-diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
-index 1f7973f9c5..aeed9ccdda 100644
---- a/bin/named/include/named/client.h
-+++ b/bin/named/include/named/client.h
-@@ -134,6 +134,7 @@ struct ns_client {
- dns_name_t *signer; /*%< NULL if not valid sig */
- bool mortal; /*%< Die after handling request */
- bool pipelined; /*%< TCP queries not in sequence */
-+ isc_refcount_t *pipeline_refs;
- isc_quota_t *tcpquota;
- isc_quota_t *recursionquota;
- ns_interface_t *interface;
-@@ -167,7 +168,6 @@ struct ns_client {
-
- ISC_LINK(ns_client_t) link;
- ISC_LINK(ns_client_t) rlink;
-- ISC_LINK(ns_client_t) glink;
- ISC_QLINK(ns_client_t) ilink;
- unsigned char cookie[8];
- uint32_t expire;
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch b/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch
deleted file mode 100644
index 034ab13303..0000000000
--- a/meta/recipes-connectivity/bind/bind/0004-better-tcpquota-accounting-and-client-mortality-chec.patch
+++ /dev/null
@@ -1,512 +0,0 @@
-Backport patch to fix CVE-2018-5743.
-
-Ref:
-https://security-tracker.debian.org/tracker/CVE-2018-5743
-
-CVE: CVE-2018-5743
-Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/2ab8a08]
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-From 2ab8a085b3c666f28f1f9229bd6ecb59915b26c3 Mon Sep 17 00:00:00 2001
-From: Evan Hunt <each@isc.org>
-Date: Fri, 5 Apr 2019 16:12:18 -0700
-Subject: [PATCH 4/6] better tcpquota accounting and client mortality checks
-
-- ensure that tcpactive is cleaned up correctly when accept() fails.
-- set 'client->tcpattached' when the client is attached to the tcpquota.
- carry this value on to new clients sharing the same pipeline group.
- don't call isc_quota_detach() on the tcpquota unless tcpattached is
- set. this way clients that were allowed to accept TCP connections
- despite being over quota (and therefore, were never attached to the
- quota) will not inadvertently detach from it and mess up the
- accounting.
-- simplify the code for tcpquota disconnection by using a new function
- tcpquota_disconnect().
-- before deciding whether to reject a new connection due to quota
- exhaustion, check to see whether there are at least two active
- clients. previously, this was "at least one", but that could be
- insufficient if there was one other client in READING state (waiting
- for messages on an open connection) but none in READY (listening
- for new connections).
-- before deciding whether a TCP client object can to go inactive, we
- must ensure there are enough other clients to maintain service
- afterward -- both accepting new connections and reading/processing new
- queries. A TCP client can't shut down unless at least one
- client is accepting new connections and (in the case of pipelined
- clients) at least one additional client is waiting to read.
-
-(cherry picked from commit c7394738b2445c16f728a88394864dd61baad900)
-(cherry picked from commit e965d5f11d3d0f6d59704e614fceca2093cb1856)
-(cherry picked from commit 87d431161450777ea093821212abfb52d51b36e3)
----
- bin/named/client.c | 244 +++++++++++++++++++------------
- bin/named/include/named/client.h | 3 +-
- 2 files changed, 152 insertions(+), 95 deletions(-)
-
-diff --git a/bin/named/client.c b/bin/named/client.c
-index 277656cef0..61e96dd28c 100644
---- a/bin/named/client.c
-+++ b/bin/named/client.c
-@@ -244,13 +244,14 @@ static void client_start(isc_task_t *task, isc_event_t *event);
- static void client_request(isc_task_t *task, isc_event_t *event);
- static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
- static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
-- dns_dispatch_t *disp, bool tcp);
-+ dns_dispatch_t *disp, ns_client_t *oldclient,
-+ bool tcp);
- static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
- isc_socket_t *sock, ns_client_t *oldclient);
- static inline bool
- allowed(isc_netaddr_t *addr, dns_name_t *signer,
- isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
-- uint8_t *ecs_scope, dns_acl_t *acl)
-+ uint8_t *ecs_scope, dns_acl_t *acl);
- static void compute_cookie(ns_client_t *client, uint32_t when,
- uint32_t nonce, const unsigned char *secret,
- isc_buffer_t *buf);
-@@ -319,7 +320,7 @@ pipeline_init(ns_client_t *client) {
- * contention here is expected to be negligible, given that this code
- * is only executed for TCP connections.
- */
-- refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
-+ refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
- isc_refcount_init(refs, 1);
- client->pipeline_refs = refs;
- }
-@@ -331,13 +332,13 @@ pipeline_init(ns_client_t *client) {
- */
- static void
- pipeline_attach(ns_client_t *source, ns_client_t *target) {
-- int old_refs;
-+ int refs;
-
- REQUIRE(source->pipeline_refs != NULL);
- REQUIRE(target->pipeline_refs == NULL);
-
-- old_refs = isc_refcount_increment(source->pipeline_refs);
-- INSIST(old_refs > 0);
-+ isc_refcount_increment(source->pipeline_refs, &refs);
-+ INSIST(refs > 1);
- target->pipeline_refs = source->pipeline_refs;
- }
-
-@@ -349,25 +350,51 @@ pipeline_attach(ns_client_t *source, ns_client_t *target) {
- */
- static bool
- pipeline_detach(ns_client_t *client) {
-- isc_refcount_t *refs;
-- int old_refs;
-+ isc_refcount_t *refcount;
-+ int refs;
-
- REQUIRE(client->pipeline_refs != NULL);
-
-- refs = client->pipeline_refs;
-+ refcount = client->pipeline_refs;
- client->pipeline_refs = NULL;
-
-- old_refs = isc_refcount_decrement(refs);
-- INSIST(old_refs > 0);
-+ isc_refcount_decrement(refcount, refs);
-
-- if (old_refs == 1) {
-- isc_mem_free(client->sctx->mctx, refs);
-+ if (refs == 0) {
-+ isc_mem_free(ns_g_mctx, refs);
- return (true);
- }
-
- return (false);
- }
-
-+/*
-+ * Detach a client from the TCP client quota if appropriate, and set
-+ * the quota pointer to NULL.
-+ *
-+ * Sometimes when the TCP client quota is exhausted but there are no other
-+ * clients servicing the interface, a client will be allowed to continue
-+ * running despite not having been attached to the quota. In this event,
-+ * the TCP quota was never attached to the client, so when the client (or
-+ * associated pipeline group) shuts down, the quota must NOT be detached.
-+ *
-+ * Otherwise, if the quota pointer is set, it should be detached. If not
-+ * set at all, we just return without doing anything.
-+ */
-+static void
-+tcpquota_disconnect(ns_client_t *client) {
-+ if (client->tcpquota == NULL) {
-+ return;
-+ }
-+
-+ if (client->tcpattached) {
-+ isc_quota_detach(&client->tcpquota);
-+ client->tcpattached = false;
-+ } else {
-+ client->tcpquota = NULL;
-+ }
-+}
-+
- /*%
- * Check for a deactivation or shutdown request and take appropriate
- * action. Returns true if either is in progress; in this case
-@@ -490,38 +517,31 @@ exit_check(ns_client_t *client) {
- client->tcpmsg_valid = false;
- }
-
-- if (client->tcpquota != NULL) {
-- if (client->pipeline_refs == NULL ||
-- pipeline_detach(client))
-- {
-- /*
-- * Only detach from the TCP client quota if
-- * there are no more client structures using
-- * this TCP connection.
-- *
-- * Note that we check 'pipeline_refs' and not
-- * 'pipelined' because in some cases (e.g.
-- * after receiving a request with an opcode
-- * different than QUERY) 'pipelined' is set to
-- * false after the reference counter gets
-- * allocated in pipeline_init() and we must
-- * still drop our reference as failing to do so
-- * would prevent the reference counter itself
-- * from being freed.
-- */
-- isc_quota_detach(&client->tcpquota);
-- } else {
-- /*
-- * There are other client structures using this
-- * TCP connection, so we cannot detach from the
-- * TCP client quota to prevent excess TCP
-- * connections from being accepted. However,
-- * this client structure might later be reused
-- * for accepting new connections and thus must
-- * have its 'tcpquota' field set to NULL.
-- */
-- client->tcpquota = NULL;
-- }
-+ /*
-+ * Detach from pipeline group and from TCP client quota,
-+ * if appropriate.
-+ *
-+ * - If no pipeline group is active, attempt to
-+ * detach from the TCP client quota.
-+ *
-+ * - If a pipeline group is active, detach from it;
-+ * if the return code indicates that there no more
-+ * clients left if this pipeline group, we also detach
-+ * from the TCP client quota.
-+ *
-+ * - Otherwise we don't try to detach, we just set the
-+ * TCP quota pointer to NULL if it wasn't NULL already.
-+ *
-+ * tcpquota_disconnect() will set tcpquota to NULL, either
-+ * by detaching it or by assignment, depending on the
-+ * needs of the client. See the comments on that function
-+ * for further information.
-+ */
-+ if (client->pipeline_refs == NULL || pipeline_detach(client)) {
-+ tcpquota_disconnect(client);
-+ } else {
-+ client->tcpquota = NULL;
-+ client->tcpattached = false;
- }
-
- if (client->tcpsocket != NULL) {
-@@ -544,8 +564,6 @@ exit_check(ns_client_t *client) {
- client->timerset = false;
- }
-
-- client->pipelined = false;
--
- client->peeraddr_valid = false;
-
- client->state = NS_CLIENTSTATE_READY;
-@@ -558,18 +576,27 @@ exit_check(ns_client_t *client) {
- * active and force it to go inactive if not.
- *
- * UDP clients go inactive at this point, but a TCP client
-- * will needs to remain active if no other clients are
-- * listening for TCP requests on this interface, to
-- * prevent this interface from going nonresponsive.
-+ * may need to remain active and go into ready state if
-+ * no other clients are available to listen for TCP
-+ * requests on this interface or (in the case of pipelined
-+ * clients) to read for additional messages on the current
-+ * connection.
- */
- if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
- LOCK(&client->interface->lock);
-- if (client->interface->ntcpaccepting == 0) {
-+ if ((client->interface->ntcpaccepting == 0 ||
-+ (client->pipelined &&
-+ client->interface->ntcpactive < 2)) &&
-+ client->newstate != NS_CLIENTSTATE_FREED)
-+ {
- client->mortal = false;
-+ client->newstate = NS_CLIENTSTATE_READY;
- }
- UNLOCK(&client->interface->lock);
- }
-
-+ client->pipelined = false;
-+
- /*
- * We don't need the client; send it to the inactive
- * queue for recycling.
-@@ -2634,6 +2661,18 @@ client_request(isc_task_t *task, isc_event_t *event) {
- client->pipelined = false;
- }
- if (TCP_CLIENT(client) && client->pipelined) {
-+ /*
-+ * We're pipelining. Replace the client; the
-+ * the replacement can read the TCP socket looking
-+ * for new messages and this client can process the
-+ * current message asynchronously.
-+ *
-+ * There are now at least three clients using this
-+ * TCP socket - one accepting new connections,
-+ * one reading an existing connection to get new
-+ * messages, and one answering the message already
-+ * received.
-+ */
- result = ns_client_replace(client);
- if (result != ISC_R_SUCCESS) {
- client->pipelined = false;
-@@ -3197,6 +3236,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
- client->pipelined = false;
- client->pipeline_refs = NULL;
- client->tcpquota = NULL;
-+ client->tcpattached = false;
- client->recursionquota = NULL;
- client->interface = NULL;
- client->peeraddr_valid = false;
-@@ -3359,9 +3399,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
- "accept failed: %s",
- isc_result_totext(nevent->result));
-- if (client->tcpquota != NULL) {
-- isc_quota_detach(&client->tcpquota);
-- }
-+ tcpquota_disconnect(client);
- }
-
- if (exit_check(client))
-@@ -3402,7 +3440,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- client->pipelined = false;
- result = ns_client_replace(client);
- if (result == ISC_R_SUCCESS &&
-- (client->sctx->keepresporder == NULL ||
-+ (ns_g_server->keepresporder == NULL ||
- !allowed(&netaddr, NULL, NULL, 0, NULL,
- ns_g_server->keepresporder)))
- {
-@@ -3429,7 +3467,7 @@ client_accept(ns_client_t *client) {
- * in named.conf. If we can't attach to it here, that means the TCP
- * client quota has been exceeded.
- */
-- result = isc_quota_attach(&client->sctx->tcpquota,
-+ result = isc_quota_attach(&ns_g_server->tcpquota,
- &client->tcpquota);
- if (result != ISC_R_SUCCESS) {
- bool exit;
-@@ -3447,27 +3485,27 @@ client_accept(ns_client_t *client) {
- * interface to be starved, with no clients able
- * to accept new connections.
- *
-- * So, we check here to see if any other client
-- * is already servicing TCP queries on this
-+ * So, we check here to see if any other clients
-+ * are already servicing TCP queries on this
- * interface (whether accepting, reading, or
-- * processing).
-- *
-- * If so, then it's okay *not* to call
-- * accept - we can let this client to go inactive
-- * and the other one handle the next connection
-- * when it's ready.
-+ * processing). If there are at least two
-+ * (one reading and one processing a request)
-+ * then it's okay *not* to call accept - we
-+ * can let this client go inactive and another
-+ * one will resume accepting when it's done.
- *
-- * But if not, then we need to be a little bit
-- * flexible about the quota. We allow *one* extra
-- * TCP client through, to ensure we're listening on
-- * every interface.
-+ * If there aren't enough active clients on the
-+ * interface, then we can be a little bit
-+ * flexible about the quota. We'll allow *one*
-+ * extra client through to ensure we're listening
-+ * on every interface.
- *
-- * (Note: In practice this means that the *real*
-- * TCP client quota is tcp-clients plus the number
-- * of interfaces.)
-+ * (Note: In practice this means that the real
-+ * TCP client quota is tcp-clients plus the
-+ * number of listening interfaces plus 2.)
- */
- LOCK(&client->interface->lock);
-- exit = (client->interface->ntcpactive > 0);
-+ exit = (client->interface->ntcpactive > 1);
- UNLOCK(&client->interface->lock);
-
- if (exit) {
-@@ -3475,6 +3513,9 @@ client_accept(ns_client_t *client) {
- (void)exit_check(client);
- return;
- }
-+
-+ } else {
-+ client->tcpattached = true;
- }
-
- /*
-@@ -3507,9 +3548,16 @@ client_accept(ns_client_t *client) {
- UNEXPECTED_ERROR(__FILE__, __LINE__,
- "isc_socket_accept() failed: %s",
- isc_result_totext(result));
-- if (client->tcpquota != NULL) {
-- isc_quota_detach(&client->tcpquota);
-+
-+ tcpquota_disconnect(client);
-+
-+ if (client->tcpactive) {
-+ LOCK(&client->interface->lock);
-+ client->interface->ntcpactive--;
-+ UNLOCK(&client->interface->lock);
-+ client->tcpactive = false;
- }
-+
- return;
- }
-
-@@ -3527,13 +3575,12 @@ client_accept(ns_client_t *client) {
- * once the connection is established.
- *
- * When the client object is shutting down after handling a TCP
-- * request (see exit_check()), it looks to see whether this value is
-- * non-zero. If so, that means another client has already called
-- * accept() and is waiting to establish the next connection, which
-- * means the first client is free to go inactive. Otherwise,
-- * the first client must come back and call accept() again; this
-- * guarantees there will always be at least one client listening
-- * for new TCP connections on each interface.
-+ * request (see exit_check()), if this value is at least one, that
-+ * means another client has called accept() and is waiting to
-+ * establish the next connection. That means the client may be
-+ * be free to become inactive; otherwise it may need to start
-+ * listening for connections itself to prevent the interface
-+ * going dead.
- */
- LOCK(&client->interface->lock);
- client->interface->ntcpaccepting++;
-@@ -3613,19 +3660,19 @@ ns_client_replace(ns_client_t *client) {
- client->tcpsocket, client);
- } else {
- result = get_client(client->manager, client->interface,
-- client->dispatch, tcp);
-+ client->dispatch, client, tcp);
-+
-+ /*
-+ * The responsibility for listening for new requests is hereby
-+ * transferred to the new client. Therefore, the old client
-+ * should refrain from listening for any more requests.
-+ */
-+ client->mortal = true;
- }
- if (result != ISC_R_SUCCESS) {
- return (result);
- }
-
-- /*
-- * The responsibility for listening for new requests is hereby
-- * transferred to the new client. Therefore, the old client
-- * should refrain from listening for any more requests.
-- */
-- client->mortal = true;
--
- return (ISC_R_SUCCESS);
- }
-
-@@ -3759,7 +3806,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
-
- static isc_result_t
- get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
-- dns_dispatch_t *disp, bool tcp)
-+ dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
- {
- isc_result_t result = ISC_R_SUCCESS;
- isc_event_t *ev;
-@@ -3803,6 +3850,16 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
- client->dscp = ifp->dscp;
-
- if (tcp) {
-+ client->tcpattached = false;
-+ if (oldclient != NULL) {
-+ client->tcpattached = oldclient->tcpattached;
-+ }
-+
-+ LOCK(&client->interface->lock);
-+ client->interface->ntcpactive++;
-+ UNLOCK(&client->interface->lock);
-+ client->tcpactive = true;
-+
- client->attributes |= NS_CLIENTATTR_TCP;
- isc_socket_attach(ifp->tcpsocket,
- &client->tcplistener);
-@@ -3866,7 +3923,8 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
- ns_interface_attach(ifp, &client->interface);
- client->newstate = client->state = NS_CLIENTSTATE_WORKING;
- INSIST(client->recursionquota == NULL);
-- client->tcpquota = &client->sctx->tcpquota;
-+ client->tcpquota = &ns_g_server->tcpquota;
-+ client->tcpattached = oldclient->tcpattached;
-
- client->dscp = ifp->dscp;
-
-@@ -3885,7 +3943,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
- LOCK(&client->interface->lock);
- client->interface->ntcpactive++;
- UNLOCK(&client->interface->lock);
--
- client->tcpactive = true;
-
- INSIST(client->tcpmsg_valid == false);
-@@ -3913,7 +3970,8 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
- MTRACE("createclients");
-
- for (disp = 0; disp < n; disp++) {
-- result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
-+ result = get_client(manager, ifp, ifp->udpdispatch[disp],
-+ NULL, tcp);
- if (result != ISC_R_SUCCESS)
- break;
- }
-diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
-index aeed9ccdda..e2c40acd28 100644
---- a/bin/named/include/named/client.h
-+++ b/bin/named/include/named/client.h
-@@ -9,8 +9,6 @@
- * information regarding copyright ownership.
- */
-
--/* $Id: client.h,v 1.96 2012/01/31 23:47:31 tbox Exp $ */
--
- #ifndef NAMED_CLIENT_H
- #define NAMED_CLIENT_H 1
-
-@@ -136,6 +134,7 @@ struct ns_client {
- bool pipelined; /*%< TCP queries not in sequence */
- isc_refcount_t *pipeline_refs;
- isc_quota_t *tcpquota;
-+ bool tcpattached;
- isc_quota_t *recursionquota;
- ns_interface_t *interface;
-
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch b/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch
deleted file mode 100644
index 987e75bc0e..0000000000
--- a/meta/recipes-connectivity/bind/bind/0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch
+++ /dev/null
@@ -1,911 +0,0 @@
-Backport patch to fix CVE-2018-5743.
-
-Ref:
-https://security-tracker.debian.org/tracker/CVE-2018-5743
-
-CVE: CVE-2018-5743
-Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/c47ccf6]
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-From c47ccf630f147378568b33e8fdb7b754f228c346 Mon Sep 17 00:00:00 2001
-From: Evan Hunt <each@isc.org>
-Date: Fri, 5 Apr 2019 16:26:05 -0700
-Subject: [PATCH 5/6] refactor tcpquota and pipeline refs; allow special-case
- overrun in isc_quota
-
-- if the TCP quota has been exceeded but there are no clients listening
- for new connections on the interface, we can now force attachment to the
- quota using isc_quota_force(), instead of carrying on with the quota not
- attached.
-- the TCP client quota is now referenced via a reference-counted
- 'ns_tcpconn' object, one of which is created whenever a client begins
- listening for new connections, and attached to by members of that
- client's pipeline group. when the last reference to the tcpconn
- object is detached, it is freed and the TCP quota slot is released.
-- reduce code duplication by adding mark_tcp_active() function.
-- convert counters to atomic.
-
-(cherry picked from commit 7e8222378ca24f1302a0c1c638565050ab04681b)
-(cherry picked from commit 4939451275722bfda490ea86ca13e84f6bc71e46)
-(cherry picked from commit 13f7c918b8720d890408f678bd73c20e634539d9)
----
- bin/named/client.c | 444 +++++++++++--------------
- bin/named/include/named/client.h | 12 +-
- bin/named/include/named/interfacemgr.h | 6 +-
- bin/named/interfacemgr.c | 1 +
- lib/isc/include/isc/quota.h | 7 +
- lib/isc/quota.c | 33 +-
- lib/isc/win32/libisc.def.in | 1 +
- 7 files changed, 236 insertions(+), 268 deletions(-)
-
-diff --git a/bin/named/client.c b/bin/named/client.c
-index 61e96dd28c..d826ab32bf 100644
---- a/bin/named/client.c
-+++ b/bin/named/client.c
-@@ -244,8 +244,7 @@ static void client_start(isc_task_t *task, isc_event_t *event);
- static void client_request(isc_task_t *task, isc_event_t *event);
- static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
- static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
-- dns_dispatch_t *disp, ns_client_t *oldclient,
-- bool tcp);
-+ dns_dispatch_t *disp, bool tcp);
- static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
- isc_socket_t *sock, ns_client_t *oldclient);
- static inline bool
-@@ -301,16 +300,32 @@ ns_client_settimeout(ns_client_t *client, unsigned int seconds) {
- }
-
- /*%
-- * Allocate a reference counter that will track the number of client structures
-- * using the TCP connection that 'client' called accept() for. This counter
-- * will be shared between all client structures associated with this TCP
-- * connection.
-+ * Allocate a reference-counted object that will maintain a single pointer to
-+ * the (also reference-counted) TCP client quota, shared between all the
-+ * clients processing queries on a single TCP connection, so that all
-+ * clients sharing the one socket will together consume only one slot in
-+ * the 'tcp-clients' quota.
- */
--static void
--pipeline_init(ns_client_t *client) {
-- isc_refcount_t *refs;
-+static isc_result_t
-+tcpconn_init(ns_client_t *client, bool force) {
-+ isc_result_t result;
-+ isc_quota_t *quota = NULL;
-+ ns_tcpconn_t *tconn = NULL;
-
-- REQUIRE(client->pipeline_refs == NULL);
-+ REQUIRE(client->tcpconn == NULL);
-+
-+ /*
-+ * Try to attach to the quota first, so we won't pointlessly
-+ * allocate memory for a tcpconn object if we can't get one.
-+ */
-+ if (force) {
-+ result = isc_quota_force(&ns_g_server->tcpquota, &quota);
-+ } else {
-+ result = isc_quota_attach(&ns_g_server->tcpquota, &quota);
-+ }
-+ if (result != ISC_R_SUCCESS) {
-+ return (result);
-+ }
-
- /*
- * A global memory context is used for the allocation as different
-@@ -320,78 +335,80 @@ pipeline_init(ns_client_t *client) {
- * contention here is expected to be negligible, given that this code
- * is only executed for TCP connections.
- */
-- refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
-- isc_refcount_init(refs, 1);
-- client->pipeline_refs = refs;
-+ tconn = isc_mem_allocate(ns_g_mctx, sizeof(*tconn));
-+
-+ isc_refcount_init(&tconn->refs, 1);
-+ tconn->tcpquota = quota;
-+ quota = NULL;
-+ tconn->pipelined = false;
-+
-+ client->tcpconn = tconn;
-+
-+ return (ISC_R_SUCCESS);
- }
-
- /*%
-- * Increase the count of client structures using the TCP connection that
-- * 'source' is associated with and put a pointer to that count in 'target',
-- * thus associating it with the same TCP connection.
-+ * Increase the count of client structures sharing the TCP connection
-+ * that 'source' is associated with; add a pointer to the same tcpconn
-+ * to 'target', thus associating it with the same TCP connection.
- */
- static void
--pipeline_attach(ns_client_t *source, ns_client_t *target) {
-+tcpconn_attach(ns_client_t *source, ns_client_t *target) {
- int refs;
-
-- REQUIRE(source->pipeline_refs != NULL);
-- REQUIRE(target->pipeline_refs == NULL);
-+ REQUIRE(source->tcpconn != NULL);
-+ REQUIRE(target->tcpconn == NULL);
-+ REQUIRE(source->tcpconn->pipelined);
-
-- isc_refcount_increment(source->pipeline_refs, &refs);
-+ isc_refcount_increment(&source->tcpconn->refs, &refs);
- INSIST(refs > 1);
-- target->pipeline_refs = source->pipeline_refs;
-+ target->tcpconn = source->tcpconn;
- }
-
- /*%
-- * Decrease the count of client structures using the TCP connection that
-+ * Decrease the count of client structures sharing the TCP connection that
- * 'client' is associated with. If this is the last client using this TCP
-- * connection, free the reference counter and return true; otherwise, return
-- * false.
-+ * connection, we detach from the TCP quota and free the tcpconn
-+ * object. Either way, client->tcpconn is set to NULL.
- */
--static bool
--pipeline_detach(ns_client_t *client) {
-- isc_refcount_t *refcount;
-+static void
-+tcpconn_detach(ns_client_t *client) {
-+ ns_tcpconn_t *tconn = NULL;
- int refs;
-
-- REQUIRE(client->pipeline_refs != NULL);
--
-- refcount = client->pipeline_refs;
-- client->pipeline_refs = NULL;
-+ REQUIRE(client->tcpconn != NULL);
-
-- isc_refcount_decrement(refcount, refs);
-+ tconn = client->tcpconn;
-+ client->tcpconn = NULL;
-
-+ isc_refcount_decrement(&tconn->refs, &refs);
- if (refs == 0) {
-- isc_mem_free(ns_g_mctx, refs);
-- return (true);
-+ isc_quota_detach(&tconn->tcpquota);
-+ isc_mem_free(ns_g_mctx, tconn);
- }
--
-- return (false);
- }
-
--/*
-- * Detach a client from the TCP client quota if appropriate, and set
-- * the quota pointer to NULL.
-- *
-- * Sometimes when the TCP client quota is exhausted but there are no other
-- * clients servicing the interface, a client will be allowed to continue
-- * running despite not having been attached to the quota. In this event,
-- * the TCP quota was never attached to the client, so when the client (or
-- * associated pipeline group) shuts down, the quota must NOT be detached.
-+/*%
-+ * Mark a client as active and increment the interface's 'ntcpactive'
-+ * counter, as a signal that there is at least one client servicing
-+ * TCP queries for the interface. If we reach the TCP client quota at
-+ * some point, this will be used to determine whether a quota overrun
-+ * should be permitted.
- *
-- * Otherwise, if the quota pointer is set, it should be detached. If not
-- * set at all, we just return without doing anything.
-+ * Marking the client active with the 'tcpactive' flag ensures proper
-+ * accounting, by preventing us from incrementing or decrementing
-+ * 'ntcpactive' more than once per client.
- */
- static void
--tcpquota_disconnect(ns_client_t *client) {
-- if (client->tcpquota == NULL) {
-- return;
-- }
--
-- if (client->tcpattached) {
-- isc_quota_detach(&client->tcpquota);
-- client->tcpattached = false;
-- } else {
-- client->tcpquota = NULL;
-+mark_tcp_active(ns_client_t *client, bool active) {
-+ if (active && !client->tcpactive) {
-+ isc_atomic_xadd(&client->interface->ntcpactive, 1);
-+ client->tcpactive = active;
-+ } else if (!active && client->tcpactive) {
-+ uint32_t old =
-+ isc_atomic_xadd(&client->interface->ntcpactive, -1);
-+ INSIST(old > 0);
-+ client->tcpactive = active;
- }
- }
-
-@@ -484,7 +501,8 @@ exit_check(ns_client_t *client) {
- INSIST(client->recursionquota == NULL);
-
- if (NS_CLIENTSTATE_READING == client->newstate) {
-- if (!client->pipelined) {
-+ INSIST(client->tcpconn != NULL);
-+ if (!client->tcpconn->pipelined) {
- client_read(client);
- client->newstate = NS_CLIENTSTATE_MAX;
- return (true); /* We're done. */
-@@ -507,8 +525,8 @@ exit_check(ns_client_t *client) {
- dns_tcpmsg_cancelread(&client->tcpmsg);
- }
-
-- if (client->nreads != 0) {
-- /* Still waiting for read cancel completion. */
-+ /* Still waiting for read cancel completion. */
-+ if (client->nreads > 0) {
- return (true);
- }
-
-@@ -518,43 +536,45 @@ exit_check(ns_client_t *client) {
- }
-
- /*
-- * Detach from pipeline group and from TCP client quota,
-- * if appropriate.
-+ * Soon the client will be ready to accept a new TCP
-+ * connection or UDP request, but we may have enough
-+ * clients doing that already. Check whether this client
-+ * needs to remain active and allow it go inactive if
-+ * not.
- *
-- * - If no pipeline group is active, attempt to
-- * detach from the TCP client quota.
-+ * UDP clients always go inactive at this point, but a TCP
-+ * client may need to stay active and return to READY
-+ * state if no other clients are available to listen
-+ * for TCP requests on this interface.
- *
-- * - If a pipeline group is active, detach from it;
-- * if the return code indicates that there no more
-- * clients left if this pipeline group, we also detach
-- * from the TCP client quota.
-- *
-- * - Otherwise we don't try to detach, we just set the
-- * TCP quota pointer to NULL if it wasn't NULL already.
-- *
-- * tcpquota_disconnect() will set tcpquota to NULL, either
-- * by detaching it or by assignment, depending on the
-- * needs of the client. See the comments on that function
-- * for further information.
-+ * Regardless, if we're going to FREED state, that means
-+ * the system is shutting down and we don't need to
-+ * retain clients.
- */
-- if (client->pipeline_refs == NULL || pipeline_detach(client)) {
-- tcpquota_disconnect(client);
-- } else {
-- client->tcpquota = NULL;
-- client->tcpattached = false;
-+ if (client->mortal && TCP_CLIENT(client) &&
-+ client->newstate != NS_CLIENTSTATE_FREED &&
-+ !ns_g_clienttest &&
-+ isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
-+ {
-+ /* Nobody else is accepting */
-+ client->mortal = false;
-+ client->newstate = NS_CLIENTSTATE_READY;
-+ }
-+
-+ /*
-+ * Detach from TCP connection and TCP client quota,
-+ * if appropriate. If this is the last reference to
-+ * the TCP connection in our pipeline group, the
-+ * TCP quota slot will be released.
-+ */
-+ if (client->tcpconn) {
-+ tcpconn_detach(client);
- }
-
- if (client->tcpsocket != NULL) {
- CTRACE("closetcp");
- isc_socket_detach(&client->tcpsocket);
--
-- if (client->tcpactive) {
-- LOCK(&client->interface->lock);
-- INSIST(client->interface->ntcpactive > 0);
-- client->interface->ntcpactive--;
-- UNLOCK(&client->interface->lock);
-- client->tcpactive = false;
-- }
-+ mark_tcp_active(client, false);
- }
-
- if (client->timerset) {
-@@ -567,35 +587,6 @@ exit_check(ns_client_t *client) {
- client->peeraddr_valid = false;
-
- client->state = NS_CLIENTSTATE_READY;
-- INSIST(client->recursionquota == NULL);
--
-- /*
-- * Now the client is ready to accept a new TCP connection
-- * or UDP request, but we may have enough clients doing
-- * that already. Check whether this client needs to remain
-- * active and force it to go inactive if not.
-- *
-- * UDP clients go inactive at this point, but a TCP client
-- * may need to remain active and go into ready state if
-- * no other clients are available to listen for TCP
-- * requests on this interface or (in the case of pipelined
-- * clients) to read for additional messages on the current
-- * connection.
-- */
-- if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
-- LOCK(&client->interface->lock);
-- if ((client->interface->ntcpaccepting == 0 ||
-- (client->pipelined &&
-- client->interface->ntcpactive < 2)) &&
-- client->newstate != NS_CLIENTSTATE_FREED)
-- {
-- client->mortal = false;
-- client->newstate = NS_CLIENTSTATE_READY;
-- }
-- UNLOCK(&client->interface->lock);
-- }
--
-- client->pipelined = false;
-
- /*
- * We don't need the client; send it to the inactive
-@@ -630,7 +621,7 @@ exit_check(ns_client_t *client) {
- }
-
- /* Still waiting for accept cancel completion. */
-- if (! (client->naccepts == 0)) {
-+ if (client->naccepts > 0) {
- return (true);
- }
-
-@@ -641,7 +632,7 @@ exit_check(ns_client_t *client) {
- }
-
- /* Still waiting for recv cancel completion. */
-- if (! (client->nrecvs == 0)) {
-+ if (client->nrecvs > 0) {
- return (true);
- }
-
-@@ -654,14 +645,7 @@ exit_check(ns_client_t *client) {
- INSIST(client->recursionquota == NULL);
- if (client->tcplistener != NULL) {
- isc_socket_detach(&client->tcplistener);
--
-- if (client->tcpactive) {
-- LOCK(&client->interface->lock);
-- INSIST(client->interface->ntcpactive > 0);
-- client->interface->ntcpactive--;
-- UNLOCK(&client->interface->lock);
-- client->tcpactive = false;
-- }
-+ mark_tcp_active(client, false);
- }
- if (client->udpsocket != NULL) {
- isc_socket_detach(&client->udpsocket);
-@@ -816,7 +800,7 @@ client_start(isc_task_t *task, isc_event_t *event) {
- return;
-
- if (TCP_CLIENT(client)) {
-- if (client->pipelined) {
-+ if (client->tcpconn != NULL) {
- client_read(client);
- } else {
- client_accept(client);
-@@ -2470,6 +2454,7 @@ client_request(isc_task_t *task, isc_event_t *event) {
- client->nrecvs--;
- } else {
- INSIST(TCP_CLIENT(client));
-+ INSIST(client->tcpconn != NULL);
- REQUIRE(event->ev_type == DNS_EVENT_TCPMSG);
- REQUIRE(event->ev_sender == &client->tcpmsg);
- buffer = &client->tcpmsg.buffer;
-@@ -2657,17 +2642,19 @@ client_request(isc_task_t *task, isc_event_t *event) {
- /*
- * Pipeline TCP query processing.
- */
-- if (client->message->opcode != dns_opcode_query) {
-- client->pipelined = false;
-+ if (TCP_CLIENT(client) &&
-+ client->message->opcode != dns_opcode_query)
-+ {
-+ client->tcpconn->pipelined = false;
- }
-- if (TCP_CLIENT(client) && client->pipelined) {
-+ if (TCP_CLIENT(client) && client->tcpconn->pipelined) {
- /*
- * We're pipelining. Replace the client; the
-- * the replacement can read the TCP socket looking
-- * for new messages and this client can process the
-+ * replacement can read the TCP socket looking
-+ * for new messages and this one can process the
- * current message asynchronously.
- *
-- * There are now at least three clients using this
-+ * There will now be at least three clients using this
- * TCP socket - one accepting new connections,
- * one reading an existing connection to get new
- * messages, and one answering the message already
-@@ -2675,7 +2662,7 @@ client_request(isc_task_t *task, isc_event_t *event) {
- */
- result = ns_client_replace(client);
- if (result != ISC_R_SUCCESS) {
-- client->pipelined = false;
-+ client->tcpconn->pipelined = false;
- }
- }
-
-@@ -3233,10 +3220,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
- client->signer = NULL;
- dns_name_init(&client->signername, NULL);
- client->mortal = false;
-- client->pipelined = false;
-- client->pipeline_refs = NULL;
-- client->tcpquota = NULL;
-- client->tcpattached = false;
-+ client->tcpconn = NULL;
- client->recursionquota = NULL;
- client->interface = NULL;
- client->peeraddr_valid = false;
-@@ -3341,9 +3325,10 @@ client_read(ns_client_t *client) {
-
- static void
- client_newconn(isc_task_t *task, isc_event_t *event) {
-+ isc_result_t result;
- ns_client_t *client = event->ev_arg;
- isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
-- isc_result_t result;
-+ uint32_t old;
-
- REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
- REQUIRE(NS_CLIENT_VALID(client));
-@@ -3363,10 +3348,8 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- INSIST(client->naccepts == 1);
- client->naccepts--;
-
-- LOCK(&client->interface->lock);
-- INSIST(client->interface->ntcpaccepting > 0);
-- client->interface->ntcpaccepting--;
-- UNLOCK(&client->interface->lock);
-+ old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1);
-+ INSIST(old > 0);
-
- /*
- * We must take ownership of the new socket before the exit
-@@ -3399,7 +3382,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
- "accept failed: %s",
- isc_result_totext(nevent->result));
-- tcpquota_disconnect(client);
-+ tcpconn_detach(client);
- }
-
- if (exit_check(client))
-@@ -3437,15 +3420,13 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- * telnetting to port 53 (once per CPU) will
- * deny service to legitimate TCP clients.
- */
-- client->pipelined = false;
- result = ns_client_replace(client);
- if (result == ISC_R_SUCCESS &&
- (ns_g_server->keepresporder == NULL ||
- !allowed(&netaddr, NULL, NULL, 0, NULL,
- ns_g_server->keepresporder)))
- {
-- pipeline_init(client);
-- client->pipelined = true;
-+ client->tcpconn->pipelined = true;
- }
-
- client_read(client);
-@@ -3462,78 +3443,59 @@ client_accept(ns_client_t *client) {
- CTRACE("accept");
-
- /*
-- * The tcpquota object can only be simultaneously referenced a
-- * pre-defined number of times; this is configured by 'tcp-clients'
-- * in named.conf. If we can't attach to it here, that means the TCP
-- * client quota has been exceeded.
-+ * Set up a new TCP connection. This means try to attach to the
-+ * TCP client quota (tcp-clients), but fail if we're over quota.
- */
-- result = isc_quota_attach(&ns_g_server->tcpquota,
-- &client->tcpquota);
-+ result = tcpconn_init(client, false);
- if (result != ISC_R_SUCCESS) {
-- bool exit;
-+ bool exit;
-
-- ns_client_log(client, NS_LOGCATEGORY_CLIENT,
-- NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1),
-- "no more TCP clients: %s",
-- isc_result_totext(result));
--
-- /*
-- * We have exceeded the system-wide TCP client
-- * quota. But, we can't just block this accept
-- * in all cases, because if we did, a heavy TCP
-- * load on other interfaces might cause this
-- * interface to be starved, with no clients able
-- * to accept new connections.
-- *
-- * So, we check here to see if any other clients
-- * are already servicing TCP queries on this
-- * interface (whether accepting, reading, or
-- * processing). If there are at least two
-- * (one reading and one processing a request)
-- * then it's okay *not* to call accept - we
-- * can let this client go inactive and another
-- * one will resume accepting when it's done.
-- *
-- * If there aren't enough active clients on the
-- * interface, then we can be a little bit
-- * flexible about the quota. We'll allow *one*
-- * extra client through to ensure we're listening
-- * on every interface.
-- *
-- * (Note: In practice this means that the real
-- * TCP client quota is tcp-clients plus the
-- * number of listening interfaces plus 2.)
-- */
-- LOCK(&client->interface->lock);
-- exit = (client->interface->ntcpactive > 1);
-- UNLOCK(&client->interface->lock);
-+ ns_client_log(client, NS_LOGCATEGORY_CLIENT,
-+ NS_LOGMODULE_CLIENT, ISC_LOG_WARNING,
-+ "TCP client quota reached: %s",
-+ isc_result_totext(result));
-
-- if (exit) {
-- client->newstate = NS_CLIENTSTATE_INACTIVE;
-- (void)exit_check(client);
-- return;
-- }
-+ /*
-+ * We have exceeded the system-wide TCP client quota. But,
-+ * we can't just block this accept in all cases, because if
-+ * we did, a heavy TCP load on other interfaces might cause
-+ * this interface to be starved, with no clients able to
-+ * accept new connections.
-+ *
-+ * So, we check here to see if any other clients are
-+ * already servicing TCP queries on this interface (whether
-+ * accepting, reading, or processing). If we find at least
-+ * one, then it's okay *not* to call accept - we can let this
-+ * client go inactive and another will take over when it's
-+ * done.
-+ *
-+ * If there aren't enough active clients on the interface,
-+ * then we can be a little bit flexible about the quota.
-+ * We'll allow *one* extra client through to ensure we're
-+ * listening on every interface; we do this by setting the
-+ * 'force' option to tcpconn_init().
-+ *
-+ * (Note: In practice this means that the real TCP client
-+ * quota is tcp-clients plus the number of listening
-+ * interfaces plus 1.)
-+ */
-+ exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0);
-+ if (exit) {
-+ client->newstate = NS_CLIENTSTATE_INACTIVE;
-+ (void)exit_check(client);
-+ return;
-+ }
-
-- } else {
-- client->tcpattached = true;
-+ result = tcpconn_init(client, true);
-+ RUNTIME_CHECK(result == ISC_R_SUCCESS);
- }
-
- /*
-- * By incrementing the interface's ntcpactive counter we signal
-- * that there is at least one client servicing TCP queries for the
-- * interface.
-- *
-- * We also make note of the fact in the client itself with the
-- * tcpactive flag. This ensures proper accounting by preventing
-- * us from accidentally incrementing or decrementing ntcpactive
-- * more than once per client object.
-+ * If this client was set up using get_client() or get_worker(),
-+ * then TCP is already marked active. However, if it was restarted
-+ * from exit_check(), it might not be, so we take care of it now.
- */
-- if (!client->tcpactive) {
-- LOCK(&client->interface->lock);
-- client->interface->ntcpactive++;
-- UNLOCK(&client->interface->lock);
-- client->tcpactive = true;
-- }
-+ mark_tcp_active(client, true);
-
- result = isc_socket_accept(client->tcplistener, client->task,
- client_newconn, client);
-@@ -3549,15 +3511,8 @@ client_accept(ns_client_t *client) {
- "isc_socket_accept() failed: %s",
- isc_result_totext(result));
-
-- tcpquota_disconnect(client);
--
-- if (client->tcpactive) {
-- LOCK(&client->interface->lock);
-- client->interface->ntcpactive--;
-- UNLOCK(&client->interface->lock);
-- client->tcpactive = false;
-- }
--
-+ tcpconn_detach(client);
-+ mark_tcp_active(client, false);
- return;
- }
-
-@@ -3582,9 +3537,7 @@ client_accept(ns_client_t *client) {
- * listening for connections itself to prevent the interface
- * going dead.
- */
-- LOCK(&client->interface->lock);
-- client->interface->ntcpaccepting++;
-- UNLOCK(&client->interface->lock);
-+ isc_atomic_xadd(&client->interface->ntcpaccepting, 1);
- }
-
- static void
-@@ -3655,24 +3608,25 @@ ns_client_replace(ns_client_t *client) {
- REQUIRE(client->manager != NULL);
-
- tcp = TCP_CLIENT(client);
-- if (tcp && client->pipelined) {
-+ if (tcp && client->tcpconn != NULL && client->tcpconn->pipelined) {
- result = get_worker(client->manager, client->interface,
- client->tcpsocket, client);
- } else {
- result = get_client(client->manager, client->interface,
-- client->dispatch, client, tcp);
-+ client->dispatch, tcp);
-
-- /*
-- * The responsibility for listening for new requests is hereby
-- * transferred to the new client. Therefore, the old client
-- * should refrain from listening for any more requests.
-- */
-- client->mortal = true;
- }
- if (result != ISC_R_SUCCESS) {
- return (result);
- }
-
-+ /*
-+ * The responsibility for listening for new requests is hereby
-+ * transferred to the new client. Therefore, the old client
-+ * should refrain from listening for any more requests.
-+ */
-+ client->mortal = true;
-+
- return (ISC_R_SUCCESS);
- }
-
-@@ -3806,7 +3760,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
-
- static isc_result_t
- get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
-- dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
-+ dns_dispatch_t *disp, bool tcp)
- {
- isc_result_t result = ISC_R_SUCCESS;
- isc_event_t *ev;
-@@ -3850,15 +3804,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
- client->dscp = ifp->dscp;
-
- if (tcp) {
-- client->tcpattached = false;
-- if (oldclient != NULL) {
-- client->tcpattached = oldclient->tcpattached;
-- }
--
-- LOCK(&client->interface->lock);
-- client->interface->ntcpactive++;
-- UNLOCK(&client->interface->lock);
-- client->tcpactive = true;
-+ mark_tcp_active(client, true);
-
- client->attributes |= NS_CLIENTATTR_TCP;
- isc_socket_attach(ifp->tcpsocket,
-@@ -3923,16 +3869,14 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
- ns_interface_attach(ifp, &client->interface);
- client->newstate = client->state = NS_CLIENTSTATE_WORKING;
- INSIST(client->recursionquota == NULL);
-- client->tcpquota = &ns_g_server->tcpquota;
-- client->tcpattached = oldclient->tcpattached;
-
- client->dscp = ifp->dscp;
-
- client->attributes |= NS_CLIENTATTR_TCP;
- client->mortal = true;
-
-- pipeline_attach(oldclient, client);
-- client->pipelined = true;
-+ tcpconn_attach(oldclient, client);
-+ mark_tcp_active(client, true);
-
- isc_socket_attach(ifp->tcpsocket, &client->tcplistener);
- isc_socket_attach(sock, &client->tcpsocket);
-@@ -3940,11 +3884,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
- (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr);
- client->peeraddr_valid = true;
-
-- LOCK(&client->interface->lock);
-- client->interface->ntcpactive++;
-- UNLOCK(&client->interface->lock);
-- client->tcpactive = true;
--
- INSIST(client->tcpmsg_valid == false);
- dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg);
- client->tcpmsg_valid = true;
-@@ -3970,8 +3909,7 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
- MTRACE("createclients");
-
- for (disp = 0; disp < n; disp++) {
-- result = get_client(manager, ifp, ifp->udpdispatch[disp],
-- NULL, tcp);
-+ result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
- if (result != ISC_R_SUCCESS)
- break;
- }
-diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
-index e2c40acd28..969ee4c08f 100644
---- a/bin/named/include/named/client.h
-+++ b/bin/named/include/named/client.h
-@@ -78,6 +78,13 @@
- *** Types
- ***/
-
-+/*% reference-counted TCP connection object */
-+typedef struct ns_tcpconn {
-+ isc_refcount_t refs;
-+ isc_quota_t *tcpquota;
-+ bool pipelined;
-+} ns_tcpconn_t;
-+
- /*% nameserver client structure */
- struct ns_client {
- unsigned int magic;
-@@ -131,10 +138,7 @@ struct ns_client {
- dns_name_t signername; /*%< [T]SIG key name */
- dns_name_t *signer; /*%< NULL if not valid sig */
- bool mortal; /*%< Die after handling request */
-- bool pipelined; /*%< TCP queries not in sequence */
-- isc_refcount_t *pipeline_refs;
-- isc_quota_t *tcpquota;
-- bool tcpattached;
-+ ns_tcpconn_t *tcpconn;
- isc_quota_t *recursionquota;
- ns_interface_t *interface;
-
-diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
-index 61b08826a6..3535ef22a8 100644
---- a/bin/named/include/named/interfacemgr.h
-+++ b/bin/named/include/named/interfacemgr.h
-@@ -9,8 +9,6 @@
- * information regarding copyright ownership.
- */
-
--/* $Id: interfacemgr.h,v 1.35 2011/07/28 23:47:58 tbox Exp $ */
--
- #ifndef NAMED_INTERFACEMGR_H
- #define NAMED_INTERFACEMGR_H 1
-
-@@ -77,11 +75,11 @@ struct ns_interface {
- /*%< UDP dispatchers. */
- isc_socket_t * tcpsocket; /*%< TCP socket. */
- isc_dscp_t dscp; /*%< "listen-on" DSCP value */
-- int ntcpaccepting; /*%< Number of clients
-+ int32_t ntcpaccepting; /*%< Number of clients
- ready to accept new
- TCP connections on this
- interface */
-- int ntcpactive; /*%< Number of clients
-+ int32_t ntcpactive; /*%< Number of clients
- servicing TCP queries
- (whether accepting or
- connected) */
-diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
-index 955096ef47..d9f6df5802 100644
---- a/bin/named/interfacemgr.c
-+++ b/bin/named/interfacemgr.c
-@@ -388,6 +388,7 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
- */
- ifp->ntcpaccepting = 0;
- ifp->ntcpactive = 0;
-+
- ifp->nudpdispatch = 0;
-
- ifp->dscp = -1;
-diff --git a/lib/isc/include/isc/quota.h b/lib/isc/include/isc/quota.h
-index b9bf59877a..36c5830242 100644
---- a/lib/isc/include/isc/quota.h
-+++ b/lib/isc/include/isc/quota.h
-@@ -100,6 +100,13 @@ isc_quota_attach(isc_quota_t *quota, isc_quota_t **p);
- * quota if successful (ISC_R_SUCCESS or ISC_R_SOFTQUOTA).
- */
-
-+isc_result_t
-+isc_quota_force(isc_quota_t *quota, isc_quota_t **p);
-+/*%<
-+ * Like isc_quota_attach, but will attach '*p' to the quota
-+ * even if the hard quota has been exceeded.
-+ */
-+
- void
- isc_quota_detach(isc_quota_t **p);
- /*%<
-diff --git a/lib/isc/quota.c b/lib/isc/quota.c
-index 3ddff0d875..556a61f21d 100644
---- a/lib/isc/quota.c
-+++ b/lib/isc/quota.c
-@@ -74,20 +74,39 @@ isc_quota_release(isc_quota_t *quota) {
- UNLOCK(&quota->lock);
- }
-
--isc_result_t
--isc_quota_attach(isc_quota_t *quota, isc_quota_t **p)
--{
-+static isc_result_t
-+doattach(isc_quota_t *quota, isc_quota_t **p, bool force) {
- isc_result_t result;
-- INSIST(p != NULL && *p == NULL);
-+ REQUIRE(p != NULL && *p == NULL);
-+
- result = isc_quota_reserve(quota);
-- if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA)
-+ if (result == ISC_R_SUCCESS || result == ISC_R_SOFTQUOTA) {
-+ *p = quota;
-+ } else if (result == ISC_R_QUOTA && force) {
-+ /* attach anyway */
-+ LOCK(&quota->lock);
-+ quota->used++;
-+ UNLOCK(&quota->lock);
-+
- *p = quota;
-+ result = ISC_R_SUCCESS;
-+ }
-+
- return (result);
- }
-
-+isc_result_t
-+isc_quota_attach(isc_quota_t *quota, isc_quota_t **p) {
-+ return (doattach(quota, p, false));
-+}
-+
-+isc_result_t
-+isc_quota_force(isc_quota_t *quota, isc_quota_t **p) {
-+ return (doattach(quota, p, true));
-+}
-+
- void
--isc_quota_detach(isc_quota_t **p)
--{
-+isc_quota_detach(isc_quota_t **p) {
- INSIST(p != NULL && *p != NULL);
- isc_quota_release(*p);
- *p = NULL;
-diff --git a/lib/isc/win32/libisc.def.in b/lib/isc/win32/libisc.def.in
-index a82facec0f..7b9f23d776 100644
---- a/lib/isc/win32/libisc.def.in
-+++ b/lib/isc/win32/libisc.def.in
-@@ -519,6 +519,7 @@ isc_portset_removerange
- isc_quota_attach
- isc_quota_destroy
- isc_quota_detach
-+isc_quota_force
- isc_quota_init
- isc_quota_max
- isc_quota_release
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch b/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch
deleted file mode 100644
index 3821d18501..0000000000
--- a/meta/recipes-connectivity/bind/bind/0006-restore-allowance-for-tcp-clients-interfaces.patch
+++ /dev/null
@@ -1,80 +0,0 @@
-Backport patch to fix CVE-2018-5743.
-
-Ref:
-https://security-tracker.debian.org/tracker/CVE-2018-5743
-
-CVE: CVE-2018-5743
-Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/59434b9]
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-From 59434b987e8eb436b08c24e559ee094c4e939daa Mon Sep 17 00:00:00 2001
-From: Evan Hunt <each@isc.org>
-Date: Fri, 5 Apr 2019 16:26:19 -0700
-Subject: [PATCH 6/6] restore allowance for tcp-clients < interfaces
-
-in the "refactor tcpquota and pipeline refs" commit, the counting
-of active interfaces was tightened in such a way that named could
-fail to listen on an interface if there were more interfaces than
-tcp-clients. when checking the quota to start accepting on an
-interface, if the number of active clients was above zero, then
-it was presumed that some other client was able to handle accepting
-new connections. this, however, ignored the fact that the current client
-could be included in that count, so if the quota was already exceeded
-before all the interfaces were listening, some interfaces would never
-listen.
-
-we now check whether the current client has been marked active; if so,
-then the number of active clients on the interface must be greater
-than 1, not 0.
-
-(cherry picked from commit 0b4e2cd4c3192ba88569dd344f542a8cc43742b5)
-(cherry picked from commit d01023aaac35543daffbdf48464e320150235d41)
----
- bin/named/client.c | 8 +++++---
- doc/arm/Bv9ARM-book.xml | 3 ++-
- 2 files changed, 7 insertions(+), 4 deletions(-)
-
-diff --git a/bin/named/client.c b/bin/named/client.c
-index d826ab32bf..845326abc0 100644
---- a/bin/named/client.c
-+++ b/bin/named/client.c
-@@ -3464,8 +3464,9 @@ client_accept(ns_client_t *client) {
- *
- * So, we check here to see if any other clients are
- * already servicing TCP queries on this interface (whether
-- * accepting, reading, or processing). If we find at least
-- * one, then it's okay *not* to call accept - we can let this
-+ * accepting, reading, or processing). If we find that at
-+ * least one client other than this one is active, then
-+ * it's okay *not* to call accept - we can let this
- * client go inactive and another will take over when it's
- * done.
- *
-@@ -3479,7 +3480,8 @@ client_accept(ns_client_t *client) {
- * quota is tcp-clients plus the number of listening
- * interfaces plus 1.)
- */
-- exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) > 0);
-+ exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) >
-+ (client->tcpactive ? 1 : 0));
- if (exit) {
- client->newstate = NS_CLIENTSTATE_INACTIVE;
- (void)exit_check(client);
-diff --git a/doc/arm/Bv9ARM-book.xml b/doc/arm/Bv9ARM-book.xml
-index 381768d540..9c76d3cd6f 100644
---- a/doc/arm/Bv9ARM-book.xml
-+++ b/doc/arm/Bv9ARM-book.xml
-@@ -8493,7 +8493,8 @@ avoid-v6-udp-ports { 40000; range 50000 60000; };
- <para>
- The number of file descriptors reserved for TCP, stdio,
- etc. This needs to be big enough to cover the number of
-- interfaces <command>named</command> listens on, <command>tcp-clients</command> as well as
-+ interfaces <command>named</command> listens on plus
-+ <command>tcp-clients</command>, as well as
- to provide room for outgoing TCP queries and incoming zone
- transfers. The default is <literal>512</literal>.
- The minimum value is <literal>128</literal> and the
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch b/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch
deleted file mode 100644
index 1a84eca58a..0000000000
--- a/meta/recipes-connectivity/bind/bind/0007-Replace-atomic-operations-in-bin-named-client.c-with.patch
+++ /dev/null
@@ -1,140 +0,0 @@
-Backport commit to fix compile error on arm caused by commits which are
-to fix CVE-2018-5743.
-
-CVE: CVE-2018-5743
-Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/ef49780]
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-
-From ef49780d30d3ddc5735cfc32561b678a634fa72f Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= <ondrej@sury.org>
-Date: Wed, 17 Apr 2019 15:22:27 +0200
-Subject: [PATCH] Replace atomic operations in bin/named/client.c with
- isc_refcount reference counting
-
----
- bin/named/client.c | 18 +++++++-----------
- bin/named/include/named/interfacemgr.h | 5 +++--
- bin/named/interfacemgr.c | 7 +++++--
- 3 files changed, 15 insertions(+), 15 deletions(-)
-
-diff --git a/bin/named/client.c b/bin/named/client.c
-index 845326abc0..29fecadca8 100644
---- a/bin/named/client.c
-+++ b/bin/named/client.c
-@@ -402,12 +402,10 @@ tcpconn_detach(ns_client_t *client) {
- static void
- mark_tcp_active(ns_client_t *client, bool active) {
- if (active && !client->tcpactive) {
-- isc_atomic_xadd(&client->interface->ntcpactive, 1);
-+ isc_refcount_increment0(&client->interface->ntcpactive, NULL);
- client->tcpactive = active;
- } else if (!active && client->tcpactive) {
-- uint32_t old =
-- isc_atomic_xadd(&client->interface->ntcpactive, -1);
-- INSIST(old > 0);
-+ isc_refcount_decrement(&client->interface->ntcpactive, NULL);
- client->tcpactive = active;
- }
- }
-@@ -554,7 +552,7 @@ exit_check(ns_client_t *client) {
- if (client->mortal && TCP_CLIENT(client) &&
- client->newstate != NS_CLIENTSTATE_FREED &&
- !ns_g_clienttest &&
-- isc_atomic_xadd(&client->interface->ntcpaccepting, 0) == 0)
-+ isc_refcount_current(&client->interface->ntcpaccepting) == 0)
- {
- /* Nobody else is accepting */
- client->mortal = false;
-@@ -3328,7 +3326,6 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- isc_result_t result;
- ns_client_t *client = event->ev_arg;
- isc_socket_newconnev_t *nevent = (isc_socket_newconnev_t *)event;
-- uint32_t old;
-
- REQUIRE(event->ev_type == ISC_SOCKEVENT_NEWCONN);
- REQUIRE(NS_CLIENT_VALID(client));
-@@ -3348,8 +3345,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
- INSIST(client->naccepts == 1);
- client->naccepts--;
-
-- old = isc_atomic_xadd(&client->interface->ntcpaccepting, -1);
-- INSIST(old > 0);
-+ isc_refcount_decrement(&client->interface->ntcpaccepting, NULL);
-
- /*
- * We must take ownership of the new socket before the exit
-@@ -3480,8 +3476,8 @@ client_accept(ns_client_t *client) {
- * quota is tcp-clients plus the number of listening
- * interfaces plus 1.)
- */
-- exit = (isc_atomic_xadd(&client->interface->ntcpactive, 0) >
-- (client->tcpactive ? 1 : 0));
-+ exit = (isc_refcount_current(&client->interface->ntcpactive) >
-+ (client->tcpactive ? 1U : 0U));
- if (exit) {
- client->newstate = NS_CLIENTSTATE_INACTIVE;
- (void)exit_check(client);
-@@ -3539,7 +3535,7 @@ client_accept(ns_client_t *client) {
- * listening for connections itself to prevent the interface
- * going dead.
- */
-- isc_atomic_xadd(&client->interface->ntcpaccepting, 1);
-+ isc_refcount_increment0(&client->interface->ntcpaccepting, NULL);
- }
-
- static void
-diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h
-index 3535ef22a8..6e10f210fd 100644
---- a/bin/named/include/named/interfacemgr.h
-+++ b/bin/named/include/named/interfacemgr.h
-@@ -45,6 +45,7 @@
- #include <isc/magic.h>
- #include <isc/mem.h>
- #include <isc/socket.h>
-+#include <isc/refcount.h>
-
- #include <dns/result.h>
-
-@@ -75,11 +76,11 @@ struct ns_interface {
- /*%< UDP dispatchers. */
- isc_socket_t * tcpsocket; /*%< TCP socket. */
- isc_dscp_t dscp; /*%< "listen-on" DSCP value */
-- int32_t ntcpaccepting; /*%< Number of clients
-+ isc_refcount_t ntcpaccepting; /*%< Number of clients
- ready to accept new
- TCP connections on this
- interface */
-- int32_t ntcpactive; /*%< Number of clients
-+ isc_refcount_t ntcpactive; /*%< Number of clients
- servicing TCP queries
- (whether accepting or
- connected) */
-diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c
-index d9f6df5802..135533be6b 100644
---- a/bin/named/interfacemgr.c
-+++ b/bin/named/interfacemgr.c
-@@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr,
- * connections will be handled in parallel even though there is
- * only one client initially.
- */
-- ifp->ntcpaccepting = 0;
-- ifp->ntcpactive = 0;
-+ isc_refcount_init(&ifp->ntcpaccepting, 0);
-+ isc_refcount_init(&ifp->ntcpactive, 0);
-
- ifp->nudpdispatch = 0;
-
-@@ -618,6 +618,9 @@ ns_interface_destroy(ns_interface_t *ifp) {
-
- ns_interfacemgr_detach(&ifp->mgr);
-
-+ isc_refcount_destroy(&ifp->ntcpactive);
-+ isc_refcount_destroy(&ifp->ntcpaccepting);
-+
- ifp->magic = 0;
- isc_mem_put(mctx, ifp, sizeof(*ifp));
- }
---
-2.20.1
-
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2020-8622.patch b/meta/recipes-connectivity/bind/bind/CVE-2020-8622.patch
new file mode 100644
index 0000000000..dec5672657
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2020-8622.patch
@@ -0,0 +1,60 @@
+From ca543240380475d888d660ea3296fc880ce52f35 Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Wed, 15 Jul 2020 16:07:51 +1000
+Subject: [PATCH] bind: Always keep a copy of the message
+
+this allows it to be available even when dns_message_parse()
+returns a error.
+
+Upstream-Status: Backport
+CVE: CVE-2020-8622
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ lib/dns/message.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/lib/dns/message.c b/lib/dns/message.c
+index ac637a2..39ed80f 100644
+--- a/lib/dns/message.c
++++ b/lib/dns/message.c
+@@ -1679,6 +1679,19 @@ dns_message_parse(dns_message_t *msg, isc_buffer_t *source,
+ msg->header_ok = 0;
+ msg->question_ok = 0;
+
++ if ((options & DNS_MESSAGEPARSE_CLONEBUFFER) == 0) {
++ isc_buffer_usedregion(&origsource, &msg->saved);
++ } else {
++ msg->saved.length = isc_buffer_usedlength(&origsource);
++ msg->saved.base = isc_mem_get(msg->mctx, msg->saved.length);
++ if (msg->saved.base == NULL) {
++ return (ISC_R_NOMEMORY);
++ }
++ memmove(msg->saved.base, isc_buffer_base(&origsource),
++ msg->saved.length);
++ msg->free_saved = 1;
++ }
++
+ isc_buffer_remainingregion(source, &r);
+ if (r.length < DNS_MESSAGE_HEADERLEN)
+ return (ISC_R_UNEXPECTEDEND);
+@@ -1754,17 +1767,6 @@ dns_message_parse(dns_message_t *msg, isc_buffer_t *source,
+ }
+
+ truncated:
+- if ((options & DNS_MESSAGEPARSE_CLONEBUFFER) == 0)
+- isc_buffer_usedregion(&origsource, &msg->saved);
+- else {
+- msg->saved.length = isc_buffer_usedlength(&origsource);
+- msg->saved.base = isc_mem_get(msg->mctx, msg->saved.length);
+- if (msg->saved.base == NULL)
+- return (ISC_R_NOMEMORY);
+- memmove(msg->saved.base, isc_buffer_base(&origsource),
+- msg->saved.length);
+- msg->free_saved = 1;
+- }
+
+ if (ret == ISC_R_UNEXPECTEDEND && ignore_tc)
+ return (DNS_R_RECOVERABLE);
+--
+1.9.1
+
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2020-8623.patch b/meta/recipes-connectivity/bind/bind/CVE-2020-8623.patch
new file mode 100644
index 0000000000..8e5412a89e
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2020-8623.patch
@@ -0,0 +1,402 @@
+From 8d807cc21655eaa6e6a08afafeec3682c0f3f2ab Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= <ondrej@isc.org>
+Date: Tue, 21 Jul 2020 14:42:47 +0200
+Subject: [PATCH] Fix crash in pk11_numbits() when native-pkcs11 is used
+
+When pk11_numbits() is passed a user provided input that contains all
+zeroes (via crafted DNS message), it would crash with assertion
+failure. Fix that by properly handling such input.
+
+Upstream-Status: Backport
+CVE: CVE-2020-8623
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ lib/dns/pkcs11dh_link.c | 15 ++++++-
+ lib/dns/pkcs11dsa_link.c | 8 +++-
+ lib/dns/pkcs11rsa_link.c | 79 +++++++++++++++++++++++++--------
+ lib/isc/include/pk11/internal.h | 3 +-
+ lib/isc/pk11.c | 61 ++++++++++++++++---------
+ 5 files changed, 121 insertions(+), 45 deletions(-)
+
+diff --git a/lib/dns/pkcs11dh_link.c b/lib/dns/pkcs11dh_link.c
+index e2b60ea7c5..4cd8e32d60 100644
+--- a/lib/dns/pkcs11dh_link.c
++++ b/lib/dns/pkcs11dh_link.c
+@@ -748,6 +748,7 @@ pkcs11dh_fromdns(dst_key_t *key, isc_buffer_t *data) {
+ CK_BYTE *prime = NULL, *base = NULL, *pub = NULL;
+ CK_ATTRIBUTE *attr;
+ int special = 0;
++ unsigned int bits;
+ isc_result_t result;
+
+ isc_buffer_remainingregion(data, &r);
+@@ -852,7 +853,11 @@ pkcs11dh_fromdns(dst_key_t *key, isc_buffer_t *data) {
+ pub = r.base;
+ isc_region_consume(&r, publen);
+
+- key->key_size = pk11_numbits(prime, plen_);
++ result = pk11_numbits(prime, plen_, &bits);
++ if (result != ISC_R_SUCCESS) {
++ goto cleanup;
++ }
++ key->key_size = bits;
+
+ dh->repr = (CK_ATTRIBUTE *) isc_mem_get(key->mctx, sizeof(*attr) * 3);
+ if (dh->repr == NULL)
+@@ -1012,6 +1017,7 @@ pkcs11dh_parse(dst_key_t *key, isc_lex_t *lexer, dst_key_t *pub) {
+ dst_private_t priv;
+ isc_result_t ret;
+ int i;
++ unsigned int bits;
+ pk11_object_t *dh = NULL;
+ CK_ATTRIBUTE *attr;
+ isc_mem_t *mctx;
+@@ -1082,7 +1088,12 @@ pkcs11dh_parse(dst_key_t *key, isc_lex_t *lexer, dst_key_t *pub) {
+
+ attr = pk11_attribute_bytype(dh, CKA_PRIME);
+ INSIST(attr != NULL);
+- key->key_size = pk11_numbits(attr->pValue, attr->ulValueLen);
++
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen, &bits);
++ if (ret != ISC_R_SUCCESS) {
++ goto err;
++ }
++ key->key_size = bits;
+
+ return (ISC_R_SUCCESS);
+
+diff --git a/lib/dns/pkcs11dsa_link.c b/lib/dns/pkcs11dsa_link.c
+index 12d707a112..24d4c149ff 100644
+--- a/lib/dns/pkcs11dsa_link.c
++++ b/lib/dns/pkcs11dsa_link.c
+@@ -983,6 +983,7 @@ pkcs11dsa_parse(dst_key_t *key, isc_lex_t *lexer, dst_key_t *pub) {
+ dst_private_t priv;
+ isc_result_t ret;
+ int i;
++ unsigned int bits;
+ pk11_object_t *dsa = NULL;
+ CK_ATTRIBUTE *attr;
+ isc_mem_t *mctx = key->mctx;
+@@ -1072,7 +1073,12 @@ pkcs11dsa_parse(dst_key_t *key, isc_lex_t *lexer, dst_key_t *pub) {
+
+ attr = pk11_attribute_bytype(dsa, CKA_PRIME);
+ INSIST(attr != NULL);
+- key->key_size = pk11_numbits(attr->pValue, attr->ulValueLen);
++
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen, &bits);
++ if (ret != ISC_R_SUCCESS) {
++ goto err;
++ }
++ key->key_size = bits;
+
+ return (ISC_R_SUCCESS);
+
+diff --git a/lib/dns/pkcs11rsa_link.c b/lib/dns/pkcs11rsa_link.c
+index 096c1a8e91..1d10d26564 100644
+--- a/lib/dns/pkcs11rsa_link.c
++++ b/lib/dns/pkcs11rsa_link.c
+@@ -332,6 +332,7 @@ pkcs11rsa_createctx_verify(dst_key_t *key, unsigned int maxbits,
+ key->key_alg == DST_ALG_RSASHA256 ||
+ key->key_alg == DST_ALG_RSASHA512);
+ #endif
++ REQUIRE(maxbits <= RSA_MAX_PUBEXP_BITS);
+
+ /*
+ * Reject incorrect RSA key lengths.
+@@ -376,6 +377,7 @@ pkcs11rsa_createctx_verify(dst_key_t *key, unsigned int maxbits,
+ for (attr = pk11_attribute_first(rsa);
+ attr != NULL;
+ attr = pk11_attribute_next(rsa, attr))
++ {
+ switch (attr->type) {
+ case CKA_MODULUS:
+ INSIST(keyTemplate[5].type == attr->type);
+@@ -396,12 +398,16 @@ pkcs11rsa_createctx_verify(dst_key_t *key, unsigned int maxbits,
+ memmove(keyTemplate[6].pValue, attr->pValue,
+ attr->ulValueLen);
+ keyTemplate[6].ulValueLen = attr->ulValueLen;
+- if (pk11_numbits(attr->pValue,
+- attr->ulValueLen) > maxbits &&
+- maxbits != 0)
++ unsigned int bits;
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen,
++ &bits);
++ if (ret != ISC_R_SUCCESS ||
++ (bits > maxbits && maxbits != 0)) {
+ DST_RET(DST_R_VERIFYFAILURE);
++ }
+ break;
+ }
++ }
+ pk11_ctx->object = CK_INVALID_HANDLE;
+ pk11_ctx->ontoken = false;
+ PK11_RET(pkcs_C_CreateObject,
+@@ -1072,6 +1078,7 @@ pkcs11rsa_verify(dst_context_t *dctx, const isc_region_t *sig) {
+ keyTemplate[5].ulValueLen = attr->ulValueLen;
+ break;
+ case CKA_PUBLIC_EXPONENT:
++ unsigned int bits;
+ INSIST(keyTemplate[6].type == attr->type);
+ keyTemplate[6].pValue = isc_mem_get(dctx->mctx,
+ attr->ulValueLen);
+@@ -1080,10 +1087,12 @@ pkcs11rsa_verify(dst_context_t *dctx, const isc_region_t *sig) {
+ memmove(keyTemplate[6].pValue, attr->pValue,
+ attr->ulValueLen);
+ keyTemplate[6].ulValueLen = attr->ulValueLen;
+- if (pk11_numbits(attr->pValue,
+- attr->ulValueLen)
+- > RSA_MAX_PUBEXP_BITS)
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen,
++ &bits);
++ if (ret != ISC_R_SUCCESS || bits > RSA_MAX_PUBEXP_BITS)
++ {
+ DST_RET(DST_R_VERIFYFAILURE);
++ }
+ break;
+ }
+ pk11_ctx->object = CK_INVALID_HANDLE;
+@@ -1461,6 +1470,8 @@ pkcs11rsa_fromdns(dst_key_t *key, isc_buffer_t *data) {
+ CK_BYTE *exponent = NULL, *modulus = NULL;
+ CK_ATTRIBUTE *attr;
+ unsigned int length;
++ unsigned int bits;
++ isc_result_t ret = ISC_R_SUCCESS;
+
+ isc_buffer_remainingregion(data, &r);
+ if (r.length == 0)
+@@ -1478,9 +1489,7 @@ pkcs11rsa_fromdns(dst_key_t *key, isc_buffer_t *data) {
+
+ if (e_bytes == 0) {
+ if (r.length < 2) {
+- isc_safe_memwipe(rsa, sizeof(*rsa));
+- isc_mem_put(key->mctx, rsa, sizeof(*rsa));
+- return (DST_R_INVALIDPUBLICKEY);
++ DST_RET(DST_R_INVALIDPUBLICKEY);
+ }
+ e_bytes = (*r.base) << 8;
+ isc_region_consume(&r, 1);
+@@ -1489,16 +1498,18 @@ pkcs11rsa_fromdns(dst_key_t *key, isc_buffer_t *data) {
+ }
+
+ if (r.length < e_bytes) {
+- isc_safe_memwipe(rsa, sizeof(*rsa));
+- isc_mem_put(key->mctx, rsa, sizeof(*rsa));
+- return (DST_R_INVALIDPUBLICKEY);
++ DST_RET(DST_R_INVALIDPUBLICKEY);
+ }
+ exponent = r.base;
+ isc_region_consume(&r, e_bytes);
+ modulus = r.base;
+ mod_bytes = r.length;
+
+- key->key_size = pk11_numbits(modulus, mod_bytes);
++ ret = pk11_numbits(modulus, mod_bytes, &bits);
++ if (ret != ISC_R_SUCCESS) {
++ goto err;
++ }
++ key->key_size = bits;
+
+ isc_buffer_forward(data, length);
+
+@@ -1548,9 +1559,12 @@ pkcs11rsa_fromdns(dst_key_t *key, isc_buffer_t *data) {
+ rsa->repr,
+ rsa->attrcnt * sizeof(*attr));
+ }
++ ret = ISC_R_NOMEMORY;
++
++ err:
+ isc_safe_memwipe(rsa, sizeof(*rsa));
+ isc_mem_put(key->mctx, rsa, sizeof(*rsa));
+- return (ISC_R_NOMEMORY);
++ return (ret);
+ }
+
+ static isc_result_t
+@@ -1729,6 +1743,7 @@ pkcs11rsa_fetch(dst_key_t *key, const char *engine, const char *label,
+ pk11_object_t *pubrsa;
+ pk11_context_t *pk11_ctx = NULL;
+ isc_result_t ret;
++ unsigned int bits;
+
+ if (label == NULL)
+ return (DST_R_NOENGINE);
+@@ -1815,7 +1830,11 @@ pkcs11rsa_fetch(dst_key_t *key, const char *engine, const char *label,
+
+ attr = pk11_attribute_bytype(rsa, CKA_MODULUS);
+ INSIST(attr != NULL);
+- key->key_size = pk11_numbits(attr->pValue, attr->ulValueLen);
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen, &bits);
++ if (ret != ISC_R_SUCCESS) {
++ goto err;
++ }
++ key->key_size = bits;
+
+ return (ISC_R_SUCCESS);
+
+@@ -1901,6 +1920,7 @@ pkcs11rsa_parse(dst_key_t *key, isc_lex_t *lexer, dst_key_t *pub) {
+ CK_ATTRIBUTE *attr;
+ isc_mem_t *mctx = key->mctx;
+ const char *engine = NULL, *label = NULL;
++ unsigned int bits;
+
+ /* read private key file */
+ ret = dst__privstruct_parse(key, DST_ALG_RSA, lexer, mctx, &priv);
+@@ -2044,12 +2064,22 @@ pkcs11rsa_parse(dst_key_t *key, isc_lex_t *lexer, dst_key_t *pub) {
+
+ attr = pk11_attribute_bytype(rsa, CKA_MODULUS);
+ INSIST(attr != NULL);
+- key->key_size = pk11_numbits(attr->pValue, attr->ulValueLen);
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen, &bits);
++ if (ret != ISC_R_SUCCESS) {
++ goto err;
++ }
++ key->key_size = bits;
+
+ attr = pk11_attribute_bytype(rsa, CKA_PUBLIC_EXPONENT);
+ INSIST(attr != NULL);
+- if (pk11_numbits(attr->pValue, attr->ulValueLen) > RSA_MAX_PUBEXP_BITS)
++
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen, &bits);
++ if (ret != ISC_R_SUCCESS) {
++ goto err;
++ }
++ if (bits > RSA_MAX_PUBEXP_BITS) {
+ DST_RET(ISC_R_RANGE);
++ }
+
+ dst__privstruct_free(&priv, mctx);
+ isc_safe_memwipe(&priv, sizeof(priv));
+@@ -2084,6 +2114,7 @@ pkcs11rsa_fromlabel(dst_key_t *key, const char *engine, const char *label,
+ pk11_context_t *pk11_ctx = NULL;
+ isc_result_t ret;
+ unsigned int i;
++ unsigned int bits;
+
+ UNUSED(pin);
+
+@@ -2178,12 +2209,22 @@ pkcs11rsa_fromlabel(dst_key_t *key, const char *engine, const char *label,
+
+ attr = pk11_attribute_bytype(rsa, CKA_PUBLIC_EXPONENT);
+ INSIST(attr != NULL);
+- if (pk11_numbits(attr->pValue, attr->ulValueLen) > RSA_MAX_PUBEXP_BITS)
++
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen, &bits);
++ if (ret != ISC_R_SUCCESS) {
++ goto err;
++ }
++ if (bits > RSA_MAX_PUBEXP_BITS) {
+ DST_RET(ISC_R_RANGE);
++ }
+
+ attr = pk11_attribute_bytype(rsa, CKA_MODULUS);
+ INSIST(attr != NULL);
+- key->key_size = pk11_numbits(attr->pValue, attr->ulValueLen);
++ ret = pk11_numbits(attr->pValue, attr->ulValueLen, &bits);
++ if (ret != ISC_R_SUCCESS) {
++ goto err;
++ }
++ key->key_size = bits;
+
+ pk11_return_session(pk11_ctx);
+ isc_safe_memwipe(pk11_ctx, sizeof(*pk11_ctx));
+diff --git a/lib/isc/include/pk11/internal.h b/lib/isc/include/pk11/internal.h
+index aa8907ab08..7cc8ec812b 100644
+--- a/lib/isc/include/pk11/internal.h
++++ b/lib/isc/include/pk11/internal.h
+@@ -25,7 +25,8 @@ void pk11_mem_put(void *ptr, size_t size);
+
+ CK_SLOT_ID pk11_get_best_token(pk11_optype_t optype);
+
+-unsigned int pk11_numbits(CK_BYTE_PTR data, unsigned int bytecnt);
++isc_result_t
++pk11_numbits(CK_BYTE_PTR data, unsigned int bytecnt, unsigned int *bits);
+
+ CK_ATTRIBUTE *pk11_attribute_first(const pk11_object_t *obj);
+
+diff --git a/lib/isc/pk11.c b/lib/isc/pk11.c
+index 012afd968a..4e4052044b 100644
+--- a/lib/isc/pk11.c
++++ b/lib/isc/pk11.c
+@@ -962,13 +962,15 @@ pk11_get_best_token(pk11_optype_t optype) {
+ return (token->slotid);
+ }
+
+-unsigned int
+-pk11_numbits(CK_BYTE_PTR data, unsigned int bytecnt) {
++isc_result_t
++pk11_numbits(CK_BYTE_PTR data, unsigned int bytecnt, unsigned int *bits) {
+ unsigned int bitcnt, i;
+ CK_BYTE top;
+
+- if (bytecnt == 0)
+- return (0);
++ if (bytecnt == 0) {
++ *bits = 0;
++ return (ISC_R_SUCCESS);
++ }
+ bitcnt = bytecnt * 8;
+ for (i = 0; i < bytecnt; i++) {
+ top = data[i];
+@@ -976,26 +978,41 @@ pk11_numbits(CK_BYTE_PTR data, unsigned int bytecnt) {
+ bitcnt -= 8;
+ continue;
+ }
+- if (top & 0x80)
+- return (bitcnt);
+- if (top & 0x40)
+- return (bitcnt - 1);
+- if (top & 0x20)
+- return (bitcnt - 2);
+- if (top & 0x10)
+- return (bitcnt - 3);
+- if (top & 0x08)
+- return (bitcnt - 4);
+- if (top & 0x04)
+- return (bitcnt - 5);
+- if (top & 0x02)
+- return (bitcnt - 6);
+- if (top & 0x01)
+- return (bitcnt - 7);
++ if (top & 0x80) {
++ *bits = bitcnt;
++ return (ISC_R_SUCCESS);
++ }
++ if (top & 0x40) {
++ *bits = bitcnt - 1;
++ return (ISC_R_SUCCESS);
++ }
++ if (top & 0x20) {
++ *bits = bitcnt - 2;
++ return (ISC_R_SUCCESS);
++ }
++ if (top & 0x10) {
++ *bits = bitcnt - 3;
++ return (ISC_R_SUCCESS);
++ }
++ if (top & 0x08) {
++ *bits = bitcnt - 4;
++ return (ISC_R_SUCCESS);
++ }
++ if (top & 0x04) {
++ *bits = bitcnt - 5;
++ return (ISC_R_SUCCESS);
++ }
++ if (top & 0x02) {
++ *bits = bitcnt - 6;
++ return (ISC_R_SUCCESS);
++ }
++ if (top & 0x01) {
++ *bits = bitcnt - 7;
++ return (ISC_R_SUCCESS);
++ }
+ break;
+ }
+- INSIST(0);
+- ISC_UNREACHABLE();
++ return (ISC_R_RANGE);
+ }
+
+ CK_ATTRIBUTE *
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2020-8624.patch b/meta/recipes-connectivity/bind/bind/CVE-2020-8624.patch
new file mode 100644
index 0000000000..9cffe358bf
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2020-8624.patch
@@ -0,0 +1,33 @@
+From a73c3d30de7fe98af9e4dc0e490f732a48412380 Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Wed, 29 Jul 2020 23:36:03 +1000
+Subject: [PATCH] bind: Update-policy 'subdomain' was incorrectly treated as
+ 'zonesub'
+
+resulting in names outside the specified subdomain having the wrong
+restrictions for the given key.
+
+Upstream-Status: Backport
+CVE: CVE-2020-8624
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ bin/named/zoneconf.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/bin/named/zoneconf.c b/bin/named/zoneconf.c
+index e237bdb..4898447 100644
+--- a/bin/named/zoneconf.c
++++ b/bin/named/zoneconf.c
+@@ -237,7 +237,8 @@ configure_zone_ssutable(const cfg_obj_t *zconfig, dns_zone_t *zone,
+
+ str = cfg_obj_asstring(matchtype);
+ CHECK(dns_ssu_mtypefromstring(str, &mtype));
+- if (mtype == dns_ssumatchtype_subdomain) {
++ if (mtype == dns_ssumatchtype_subdomain &&
++ strcasecmp(str, "zonesub") == 0) {
+ usezone = true;
+ }
+
+--
+1.9.1
+
diff --git a/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch b/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch
index 37e210e6da..84559e5f37 100644
--- a/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch
+++ b/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch
@@ -1,4 +1,4 @@
-From 9473d29843579802e96b0293a3e953fed93de82c Mon Sep 17 00:00:00 2001
+From edda20fb5a6e88548f85e39d34d6c074306e15bc Mon Sep 17 00:00:00 2001
From: Paul Gortmaker <paul.gortmaker@windriver.com>
Date: Tue, 9 Jun 2015 11:22:00 -0400
Subject: [PATCH] bind: ensure searching for json headers searches sysroot
@@ -27,15 +27,16 @@ to make use of the combination some day.
Upstream-Status: Inappropriate [OE Specific]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+
---
- configure.in | 2 +-
+ configure.ac | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-Index: bind-9.11.3/configure.in
-===================================================================
---- bind-9.11.3.orig/configure.in
-+++ bind-9.11.3/configure.in
-@@ -2574,7 +2574,7 @@ case "$use_libjson" in
+diff --git a/configure.ac b/configure.ac
+index 17392fd..e85a5c6 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -2449,7 +2449,7 @@ case "$use_libjson" in
libjson_libs=""
;;
auto|yes)
diff --git a/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb b/meta/recipes-connectivity/bind/bind_9.11.19.bb
index b0bb64b7c7..d4467b0b48 100644
--- a/meta/recipes-connectivity/bind/bind_9.11.5-P4.bb
+++ b/meta/recipes-connectivity/bind/bind_9.11.19.bb
@@ -3,7 +3,7 @@ HOMEPAGE = "http://www.isc.org/sw/bind/"
SECTION = "console/network"
LICENSE = "ISC & BSD"
-LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=8f17f64e47e83b60cd920a1e4b54419e"
+LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=bf39058a7f64b2a934ce14dc9ec1dd45"
DEPENDS = "openssl libcap zlib"
@@ -15,23 +15,15 @@ SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \
file://make-etc-initd-bind-stop-work.patch \
file://init.d-add-support-for-read-only-rootfs.patch \
file://bind-ensure-searching-for-json-headers-searches-sysr.patch \
- file://0001-gen.c-extend-DIRNAMESIZE-from-256-to-512.patch \
- file://0001-lib-dns-gen.c-fix-too-long-error.patch \
file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \
file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \
file://0001-avoid-start-failure-with-bind-user.patch \
- file://0001-bind-fix-CVE-2019-6471.patch \
- file://0001-fix-enforcement-of-tcp-clients-v1.patch \
- file://0002-tcp-clients-could-still-be-exceeded-v2.patch \
- file://0003-use-reference-counter-for-pipeline-groups-v3.patch \
- file://0004-better-tcpquota-accounting-and-client-mortality-chec.patch \
- file://0005-refactor-tcpquota-and-pipeline-refs-allow-special-ca.patch \
- file://0006-restore-allowance-for-tcp-clients-interfaces.patch \
- file://0007-Replace-atomic-operations-in-bin-named-client.c-with.patch \
-"
-
-SRC_URI[md5sum] = "8ddab4b61fa4516fe404679c74e37960"
-SRC_URI[sha256sum] = "7e8c08192bcbaeb6e9f2391a70e67583b027b90e8c4bc1605da6eb126edde434"
+ file://CVE-2020-8622.patch \
+ file://CVE-2020-8623.patch \
+ file://CVE-2020-8624.patch \
+ "
+
+SRC_URI[sha256sum] = "0dee554a4caa368948b32da9a0c97b516c19103bc13ff5b3762c5d8552f52329"
UPSTREAM_CHECK_URI = "https://ftp.isc.org/isc/bind9/"
# stay at 9.11 until 9.16, from 9.16 follow the ESV versions divisible by 4
diff --git a/meta/recipes-connectivity/bluez5/bluez5.inc b/meta/recipes-connectivity/bluez5/bluez5.inc
index f582a07e22..75fc2dbf4c 100644
--- a/meta/recipes-connectivity/bluez5/bluez5.inc
+++ b/meta/recipes-connectivity/bluez5/bluez5.inc
@@ -58,6 +58,8 @@ SRC_URI = "\
file://CVE-2018-10910.patch \
file://gcc9-fixes.patch \
file://0001-tools-Fix-build-after-y2038-changes-in-glibc.patch \
+ file://CVE-2020-0556-1.patch \
+ file://CVE-2020-0556-2.patch \
"
S = "${WORKDIR}/bluez-${PV}"
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2020-0556-1.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2020-0556-1.patch
new file mode 100644
index 0000000000..a6bf31e14b
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2020-0556-1.patch
@@ -0,0 +1,35 @@
+From 8cdbd3b09f29da29374e2f83369df24228da0ad1 Mon Sep 17 00:00:00 2001
+From: Alain Michaud <alainm@chromium.org>
+Date: Tue, 10 Mar 2020 02:35:16 +0000
+Subject: [PATCH 1/2] HOGP must only accept data from bonded devices.
+
+HOGP 1.0 Section 6.1 establishes that the HOGP must require bonding.
+
+Reference:
+https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00352.htm
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/bluetooth/bluez.git/commit/?id=8cdbd3b09f29da29374e2f83369df24228da0ad1]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+CVE: CVE-2020-0556
+---
+ profiles/input/hog.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/profiles/input/hog.c b/profiles/input/hog.c
+index 83c017dcb..dfac68921 100644
+--- a/profiles/input/hog.c
++++ b/profiles/input/hog.c
+@@ -186,6 +186,10 @@ static int hog_accept(struct btd_service *service)
+ return -EINVAL;
+ }
+
++ /* HOGP 1.0 Section 6.1 requires bonding */
++ if (!device_is_bonded(device, btd_device_get_bdaddr_type(device)))
++ return -ECONNREFUSED;
++
+ /* TODO: Replace GAttrib with bt_gatt_client */
+ bt_hog_attach(dev->hog, attrib);
+
+--
+2.24.1
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2020-0556-2.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2020-0556-2.patch
new file mode 100644
index 0000000000..8acb2f15ec
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2020-0556-2.patch
@@ -0,0 +1,143 @@
+From 3cccdbab2324086588df4ccf5f892fb3ce1f1787 Mon Sep 17 00:00:00 2001
+From: Alain Michaud <alainm@chromium.org>
+Date: Tue, 10 Mar 2020 02:35:18 +0000
+Subject: [PATCH 2/2] HID accepts bonded device connections only.
+
+This change adds a configuration for platforms to choose a more secure
+posture for the HID profile. While some older mice are known to not
+support pairing or encryption, some platform may choose a more secure
+posture by requiring the device to be bonded and require the
+connection to be encrypted when bonding is required.
+
+Reference:
+https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00352.html
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/bluetooth/bluez.git/commit/?id=3cccdbab2324086588df4ccf5f892fb3ce1f1787]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+CVE: CVE-2020-0556
+
+---
+ profiles/input/device.c | 23 ++++++++++++++++++++++-
+ profiles/input/device.h | 1 +
+ profiles/input/input.conf | 8 ++++++++
+ profiles/input/manager.c | 13 ++++++++++++-
+ 4 files changed, 43 insertions(+), 2 deletions(-)
+
+diff --git a/profiles/input/device.c b/profiles/input/device.c
+index 2cb3811c8..d89da2d7c 100644
+--- a/profiles/input/device.c
++++ b/profiles/input/device.c
+@@ -92,6 +92,7 @@ struct input_device {
+
+ static int idle_timeout = 0;
+ static bool uhid_enabled = false;
++static bool classic_bonded_only = false;
+
+ void input_set_idle_timeout(int timeout)
+ {
+@@ -103,6 +104,11 @@ void input_enable_userspace_hid(bool state)
+ uhid_enabled = state;
+ }
+
++void input_set_classic_bonded_only(bool state)
++{
++ classic_bonded_only = state;
++}
++
+ static void input_device_enter_reconnect_mode(struct input_device *idev);
+ static int connection_disconnect(struct input_device *idev, uint32_t flags);
+
+@@ -970,8 +976,18 @@ static int hidp_add_connection(struct input_device *idev)
+ if (device_name_known(idev->device))
+ device_get_name(idev->device, req->name, sizeof(req->name));
+
++ /* Make sure the device is bonded if required */
++ if (classic_bonded_only && !device_is_bonded(idev->device,
++ btd_device_get_bdaddr_type(idev->device))) {
++ error("Rejected connection from !bonded device %s", dst_addr);
++ goto cleanup;
++ }
++
+ /* Encryption is mandatory for keyboards */
+- if (req->subclass & 0x40) {
++ /* Some platforms may choose to require encryption for all devices */
++ /* Note that this only matters for pre 2.1 devices as otherwise the */
++ /* device is encrypted by default by the lower layers */
++ if (classic_bonded_only || req->subclass & 0x40) {
+ if (!bt_io_set(idev->intr_io, &gerr,
+ BT_IO_OPT_SEC_LEVEL, BT_IO_SEC_MEDIUM,
+ BT_IO_OPT_INVALID)) {
+@@ -1203,6 +1219,11 @@ static void input_device_enter_reconnect_mode(struct input_device *idev)
+ DBG("path=%s reconnect_mode=%s", idev->path,
+ reconnect_mode_to_string(idev->reconnect_mode));
+
++ /* Make sure the device is bonded if required */
++ if (classic_bonded_only && !device_is_bonded(idev->device,
++ btd_device_get_bdaddr_type(idev->device)))
++ return;
++
+ /* Only attempt an auto-reconnect when the device is required to
+ * accept reconnections from the host.
+ */
+diff --git a/profiles/input/device.h b/profiles/input/device.h
+index 51a9aee18..3044db673 100644
+--- a/profiles/input/device.h
++++ b/profiles/input/device.h
+@@ -29,6 +29,7 @@ struct input_conn;
+
+ void input_set_idle_timeout(int timeout);
+ void input_enable_userspace_hid(bool state);
++void input_set_classic_bonded_only(bool state);
+
+ int input_device_register(struct btd_service *service);
+ void input_device_unregister(struct btd_service *service);
+diff --git a/profiles/input/input.conf b/profiles/input/input.conf
+index 3e1d65aae..166aff4a4 100644
+--- a/profiles/input/input.conf
++++ b/profiles/input/input.conf
+@@ -11,3 +11,11 @@
+ # Enable HID protocol handling in userspace input profile
+ # Defaults to false (HIDP handled in HIDP kernel module)
+ #UserspaceHID=true
++
++# Limit HID connections to bonded devices
++# The HID Profile does not specify that devices must be bonded, however some
++# platforms may want to make sure that input connections only come from bonded
++# device connections. Several older mice have been known for not supporting
++# pairing/encryption.
++# Defaults to false to maximize device compatibility.
++#ClassicBondedOnly=true
+diff --git a/profiles/input/manager.c b/profiles/input/manager.c
+index 1d31b0652..5cd27b839 100644
+--- a/profiles/input/manager.c
++++ b/profiles/input/manager.c
+@@ -96,7 +96,7 @@ static int input_init(void)
+ config = load_config_file(CONFIGDIR "/input.conf");
+ if (config) {
+ int idle_timeout;
+- gboolean uhid_enabled;
++ gboolean uhid_enabled, classic_bonded_only;
+
+ idle_timeout = g_key_file_get_integer(config, "General",
+ "IdleTimeout", &err);
+@@ -114,6 +114,17 @@ static int input_init(void)
+ input_enable_userspace_hid(uhid_enabled);
+ } else
+ g_clear_error(&err);
++
++ classic_bonded_only = g_key_file_get_boolean(config, "General",
++ "ClassicBondedOnly", &err);
++
++ if (!err) {
++ DBG("input.conf: ClassicBondedOnly=%s",
++ classic_bonded_only ? "true" : "false");
++ input_set_classic_bonded_only(classic_bonded_only);
++ } else
++ g_clear_error(&err);
++
+ }
+
+ btd_profile_register(&input_profile);
+--
+2.24.1
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp/0001-Ensure-context-is-running-prior-to-calling-isc_app_c.patch b/meta/recipes-connectivity/dhcp/dhcp/0001-Ensure-context-is-running-prior-to-calling-isc_app_c.patch
new file mode 100644
index 0000000000..34b2ae1e5c
--- /dev/null
+++ b/meta/recipes-connectivity/dhcp/dhcp/0001-Ensure-context-is-running-prior-to-calling-isc_app_c.patch
@@ -0,0 +1,165 @@
+From f369dbb9e67eb5ef336944af63039b6d8f838384 Mon Sep 17 00:00:00 2001
+From: Thomas Markwalder <tmark@isc.org>
+Date: Thu, 12 Sep 2019 10:35:46 -0400
+Subject: [PATCH 1/3] Ensure context is running prior to calling
+ isc_app_ctxsuspend
+
+Add a release note.
+
+includes/omapip/isclib.h
+ Added actx_running flag to global context, dhcp_gbl_ctx
+
+omapip/isclib.c
+ set_ctx_running() - new function used as the ctxonrun callback
+
+ dhcp_context_create() - installs set_ctx_running callback
+
+ dhcp_signal_handler() - modified to use act_running flag to
+ determine is context is running and should be suspended
+
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/dhcp.git]
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ RELNOTES | 7 +++++
+ includes/omapip/isclib.h | 3 ++-
+ omapip/isclib.c | 57 +++++++++++++++++++++++++++++++++-------
+ 3 files changed, 57 insertions(+), 10 deletions(-)
+
+diff --git a/RELNOTES b/RELNOTES
+index f10305d..1730473 100644
+--- a/RELNOTES
++++ b/RELNOTES
+@@ -6,6 +6,13 @@
+
+ NEW FEATURES
+
++- Closed a small window of time between the installation of graceful
++ shutdown signal handlers and application context startup, during which
++ the receipt of shutdown signal would cause a REQUIRE() assertion to
++ occur. Note this issue is only visible when compiling with
++ ENABLE_GENTLE_SHUTDOWN defined.
++ [Gitlab #53,!18 git TBD]
++
+ Please note that that ISC DHCP is now licensed under the Mozilla Public License,
+ MPL 2.0. Please see https://www.mozilla.org/en-US/MPL/2.0/ to read the MPL 2.0
+ license terms.
+diff --git a/includes/omapip/isclib.h b/includes/omapip/isclib.h
+index 6c20584..af6a6fc 100644
+--- a/includes/omapip/isclib.h
++++ b/includes/omapip/isclib.h
+@@ -94,7 +94,8 @@
+ typedef struct dhcp_context {
+ isc_mem_t *mctx;
+ isc_appctx_t *actx;
+- int actx_started;
++ int actx_started; // ISC_TRUE if ctxstart has been called
++ int actx_running; // ISC_TRUE if ctxrun has been called
+ isc_taskmgr_t *taskmgr;
+ isc_task_t *task;
+ isc_socketmgr_t *socketmgr;
+diff --git a/omapip/isclib.c b/omapip/isclib.c
+index ce4b4a1..73e017c 100644
+--- a/omapip/isclib.c
++++ b/omapip/isclib.c
+@@ -134,6 +134,35 @@ handle_signal(int sig, void (*handler)(int)) {
+ }
+ }
+
++/* Callback passed to isc_app_ctxonrun
++ *
++ * BIND9 context code will invoke this handler once the context has
++ * entered the running state. We use it to set a global marker so that
++ * we can tell if the context is running. Several of the isc_app_
++ * calls REQUIRE that the context is running and we need a way to
++ * know that.
++ *
++ * We also check to see if we received a shutdown signal prior to
++ * the context entering the run state. If we did, then we can just
++ * simply shut the context down now. This closes the relatively
++ * small window between start up and entering run via the call
++ * to dispatch().
++ *
++ */
++static void
++set_ctx_running(isc_task_t *task, isc_event_t *event) {
++ task = task; // unused;
++ dhcp_gbl_ctx.actx_running = ISC_TRUE;
++
++ if (shutdown_signal) {
++ // We got signaled shutdown before we entered running state.
++ // Now that we've reached running state, shut'er down.
++ isc_app_ctxsuspend(dhcp_gbl_ctx.actx);
++ }
++
++ isc_event_free(&event);
++}
++
+ isc_result_t
+ dhcp_context_create(int flags,
+ struct in_addr *local4,
+@@ -141,6 +170,9 @@ dhcp_context_create(int flags,
+ isc_result_t result;
+
+ if ((flags & DHCP_CONTEXT_PRE_DB) != 0) {
++ dhcp_gbl_ctx.actx_started = ISC_FALSE;
++ dhcp_gbl_ctx.actx_running = ISC_FALSE;
++
+ /*
+ * Set up the error messages, this isn't the right place
+ * for this call but it is convienent for now.
+@@ -204,15 +236,24 @@ dhcp_context_create(int flags,
+ if (result != ISC_R_SUCCESS)
+ goto cleanup;
+
+- result = isc_task_create(dhcp_gbl_ctx.taskmgr, 0, &dhcp_gbl_ctx.task);
++ result = isc_task_create(dhcp_gbl_ctx.taskmgr, 0,
++ &dhcp_gbl_ctx.task);
+ if (result != ISC_R_SUCCESS)
+ goto cleanup;
+
+ result = isc_app_ctxstart(dhcp_gbl_ctx.actx);
+ if (result != ISC_R_SUCCESS)
+- return (result);
++ goto cleanup;
++
+ dhcp_gbl_ctx.actx_started = ISC_TRUE;
+
++ // Install the onrun callback.
++ result = isc_app_ctxonrun(dhcp_gbl_ctx.actx, dhcp_gbl_ctx.mctx,
++ dhcp_gbl_ctx.task, set_ctx_running,
++ dhcp_gbl_ctx.actx);
++ if (result != ISC_R_SUCCESS)
++ goto cleanup;
++
+ /* Not all OSs support suppressing SIGPIPE through socket
+ * options, so set the sigal action to be ignore. This allows
+ * broken connections to fail gracefully with EPIPE on writes */
+@@ -335,19 +376,17 @@ isclib_make_dst_key(char *inname,
+ * @param signal signal code that we received
+ */
+ void dhcp_signal_handler(int signal) {
+- isc_appctx_t *ctx = dhcp_gbl_ctx.actx;
+- int prev = shutdown_signal;
+-
+- if (prev != 0) {
++ if (shutdown_signal != 0) {
+ /* Already in shutdown. */
+ return;
+ }
++
+ /* Possible race but does it matter? */
+ shutdown_signal = signal;
+
+- /* Use reload (aka suspend) for easier dispatch() reenter. */
+- if (ctx && ctx->methods && ctx->methods->ctxsuspend) {
+- (void) isc_app_ctxsuspend(ctx);
++ /* If the application context is running tell it to shut down */
++ if (dhcp_gbl_ctx.actx_running == ISC_TRUE) {
++ (void) isc_app_ctxsuspend(dhcp_gbl_ctx.actx);
+ }
+ }
+
+--
+2.23.0
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp/0002-Added-shutdown-log-statment-to-dhcrelay.patch b/meta/recipes-connectivity/dhcp/dhcp/0002-Added-shutdown-log-statment-to-dhcrelay.patch
new file mode 100644
index 0000000000..78b2b74f45
--- /dev/null
+++ b/meta/recipes-connectivity/dhcp/dhcp/0002-Added-shutdown-log-statment-to-dhcrelay.patch
@@ -0,0 +1,29 @@
+From adcd34ae1f56b16d7e9696d980332b4cf6c7ce91 Mon Sep 17 00:00:00 2001
+From: Thomas Markwalder <tmark@isc.org>
+Date: Fri, 13 Sep 2019 15:03:31 -0400
+Subject: [PATCH 2/3] Added shutdown log statment to dhcrelay
+
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/dhcp.git]
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ relay/dhcrelay.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/relay/dhcrelay.c b/relay/dhcrelay.c
+index d8caaaf..4bd1d47 100644
+--- a/relay/dhcrelay.c
++++ b/relay/dhcrelay.c
+@@ -2076,6 +2076,9 @@ dhcp_set_control_state(control_object_state_t oldstate,
+ if (newstate != server_shutdown)
+ return ISC_R_SUCCESS;
+
++ /* Log shutdown on signal. */
++ log_info("Received signal %d, initiating shutdown.", shutdown_signal);
++
+ if (no_pid_file == ISC_FALSE)
+ (void) unlink(path_dhcrelay_pid);
+
+--
+2.23.0
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp/0003-Addressed-review-comment.patch b/meta/recipes-connectivity/dhcp/dhcp/0003-Addressed-review-comment.patch
new file mode 100644
index 0000000000..a51b6cf526
--- /dev/null
+++ b/meta/recipes-connectivity/dhcp/dhcp/0003-Addressed-review-comment.patch
@@ -0,0 +1,31 @@
+From e4b54b4d676783152d487103714cba2913661ef8 Mon Sep 17 00:00:00 2001
+From: Thomas Markwalder <tmark@isc.org>
+Date: Wed, 6 Nov 2019 15:53:50 -0500
+Subject: [PATCH 3/3] Addressed review comment.
+
+omapip/isclib.c
+ Added use of IGNORE_UNUSED()
+
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/dhcp.git]
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ omapip/isclib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/omapip/isclib.c b/omapip/isclib.c
+index 73e017c..1d52463 100644
+--- a/omapip/isclib.c
++++ b/omapip/isclib.c
+@@ -151,7 +151,7 @@ handle_signal(int sig, void (*handler)(int)) {
+ */
+ static void
+ set_ctx_running(isc_task_t *task, isc_event_t *event) {
+- task = task; // unused;
++ IGNORE_UNUSED(task);
+ dhcp_gbl_ctx.actx_running = ISC_TRUE;
+
+ if (shutdown_signal) {
+--
+2.23.0
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp_4.4.1.bb b/meta/recipes-connectivity/dhcp/dhcp_4.4.1.bb
index 275961a603..ddc8b60254 100644
--- a/meta/recipes-connectivity/dhcp/dhcp_4.4.1.bb
+++ b/meta/recipes-connectivity/dhcp/dhcp_4.4.1.bb
@@ -11,6 +11,9 @@ SRC_URI += "file://0001-define-macro-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.pat
file://0013-fixup_use_libbind.patch \
file://0001-master-Added-includes-of-new-BIND9-compatibility-hea.patch \
file://0001-Fix-a-NSUPDATE-compiling-issue.patch \
+ file://0001-Ensure-context-is-running-prior-to-calling-isc_app_c.patch \
+ file://0002-Added-shutdown-log-statment-to-dhcrelay.patch \
+ file://0003-Addressed-review-comment.patch \
"
SRC_URI[md5sum] = "18c7f4dcbb0a63df25098216d47b1ede"
diff --git a/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb b/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
index 684fbe09e1..cc9410b94e 100644
--- a/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
+++ b/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
@@ -143,11 +143,15 @@ ALTERNATIVE_${PN}-traceroute = "traceroute"
ALTERNATIVE_${PN}-hostname = "hostname"
ALTERNATIVE_LINK_NAME[hostname] = "${base_bindir}/hostname"
-ALTERNATIVE_${PN}-doc = "hostname.1 dnsdomainname.1 logger.1 syslogd.8"
+ALTERNATIVE_${PN}-doc = "hostname.1 dnsdomainname.1 logger.1 syslogd.8 \
+ tftpd.8 tftp.1 telnetd.8"
ALTERNATIVE_LINK_NAME[hostname.1] = "${mandir}/man1/hostname.1"
ALTERNATIVE_LINK_NAME[dnsdomainname.1] = "${mandir}/man1/dnsdomainname.1"
ALTERNATIVE_LINK_NAME[logger.1] = "${mandir}/man1/logger.1"
ALTERNATIVE_LINK_NAME[syslogd.8] = "${mandir}/man8/syslogd.8"
+ALTERNATIVE_LINK_NAME[telnetd.8] = "${mandir}/man8/telnetd.8"
+ALTERNATIVE_LINK_NAME[tftpd.8] = "${mandir}/man8/tftpd.8"
+ALTERNATIVE_LINK_NAME[tftp.1] = "${mandir}/man1/tftp.1"
ALTERNATIVE_${PN}-ifconfig = "ifconfig"
ALTERNATIVE_LINK_NAME[ifconfig] = "${base_sbindir}/ifconfig"
diff --git a/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-Disable-statx-if-using-glibc-emulation.patch b/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-Disable-statx-if-using-glibc-emulation.patch
new file mode 100644
index 0000000000..98b1391923
--- /dev/null
+++ b/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-Disable-statx-if-using-glibc-emulation.patch
@@ -0,0 +1,34 @@
+From ff3ad88c233ecd87f7983ad13836323f944540ec Mon Sep 17 00:00:00 2001
+From: Doug Nazar <nazard@nazar.ca>
+Date: Mon, 9 Dec 2019 10:53:37 -0500
+Subject: [PATCH] Disable statx if using glibc emulation
+
+On older kernels without statx, glibc with statx support will attempt
+to emulate the call. However it doesn't support AT_STATX_DONT_SYNC and
+will return EINVAL. This causes all xstat/xlstat calls to fail.
+
+Upstream-Status: Backport
+
+Signed-off-by: Doug Nazar <nazard@nazar.ca>
+Signed-off-by: Steve Dickson <steved@redhat.com>
+---
+ support/misc/xstat.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/support/misc/xstat.c b/support/misc/xstat.c
+index 661e29e4..a438fbcc 100644
+--- a/support/misc/xstat.c
++++ b/support/misc/xstat.c
+@@ -51,6 +51,9 @@ statx_do_stat(int fd, const char *pathname, struct stat *statbuf, int flags)
+ statx_copy(statbuf, &stxbuf);
+ return 0;
+ }
++ /* glibc emulation doesn't support AT_STATX_DONT_SYNC */
++ if (errno == EINVAL)
++ errno = ENOSYS;
+ if (errno == ENOSYS)
+ statx_supported = 0;
+ } else
+--
+2.19.1
+
diff --git a/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-statd-take-user-id-from-var-lib-nfs-sm.patch b/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-statd-take-user-id-from-var-lib-nfs-sm.patch
new file mode 100644
index 0000000000..87f4f098e0
--- /dev/null
+++ b/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-statd-take-user-id-from-var-lib-nfs-sm.patch
@@ -0,0 +1,102 @@
+From 12ee0ff1120a6e42b67cc90ad7d5006555e866c3 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Tue, 23 Jun 2020 09:22:22 +0000
+Subject: [PATCH] statd: take user-id from /var/lib/nfs/sm
+
+Having /var/lib/nfs writeable by statd is not ideal
+as there are files in there that statd doesn't need
+to access.
+After dropping privs, statd and sm-notify only need to
+access files in the directories sm and sm.bak.
+So take the uid for these deamons from 'sm'.
+
+Upstream-Status: Backport [https://git.linux-nfs.org/?p=steved/nfs-utils.git;a=commitdiff;h=fee2cc29e888f2ced6a76990923aef19d326dc0e]
+CVE: CVE-2019-3689
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Steve Dickson <steved@redhat.com>
+Signed-off-by: Wenlin Kang <wenlin.kang@windriver.com>
+---
+ support/nsm/file.c | 16 +++++-----------
+ utils/statd/sm-notify.man | 10 +++++++++-
+ utils/statd/statd.man | 10 +++++++++-
+ 3 files changed, 23 insertions(+), 13 deletions(-)
+
+diff --git a/support/nsm/file.c b/support/nsm/file.c
+index 0b66f12..f5b4480 100644
+--- a/support/nsm/file.c
++++ b/support/nsm/file.c
+@@ -388,23 +388,17 @@ nsm_drop_privileges(const int pidfd)
+
+ (void)umask(S_IRWXO);
+
+- /*
+- * XXX: If we can't stat dirname, or if dirname is owned by
+- * root, we should use "statduser" instead, which is set up
+- * by configure.ac. Nothing in nfs-utils seems to use
+- * "statduser," though.
+- */
+- if (lstat(nsm_base_dirname, &st) == -1) {
+- xlog(L_ERROR, "Failed to stat %s: %m", nsm_base_dirname);
+- return false;
+- }
+-
+ if (chdir(nsm_base_dirname) == -1) {
+ xlog(L_ERROR, "Failed to change working directory to %s: %m",
+ nsm_base_dirname);
+ return false;
+ }
+
++ if (lstat(NSM_MONITOR_DIR, &st) == -1) {
++ xlog(L_ERROR, "Failed to stat %s/%s: %m", nsm_base_dirname, NSM_MONITOR_DIR);
++ return false;
++ }
++
+ if (!prune_bounding_set())
+ return false;
+
+diff --git a/utils/statd/sm-notify.man b/utils/statd/sm-notify.man
+index cfe1e4b..addf5d3 100644
+--- a/utils/statd/sm-notify.man
++++ b/utils/statd/sm-notify.man
+@@ -190,7 +190,15 @@ by default.
+ After starting,
+ .B sm-notify
+ attempts to set its effective UID and GID to the owner
+-and group of this directory.
++and group of the subdirectory
++.B sm
++of this directory. After changing the effective ids,
++.B sm-notify
++only needs to access files in
++.B sm
++and
++.B sm.bak
++within the state-directory-path.
+ .TP
+ .BI -v " ipaddr " | " hostname
+ Specifies the network address from which to send reboot notifications,
+diff --git a/utils/statd/statd.man b/utils/statd/statd.man
+index 71d5846..6222701 100644
+--- a/utils/statd/statd.man
++++ b/utils/statd/statd.man
+@@ -259,7 +259,15 @@ by default.
+ After starting,
+ .B rpc.statd
+ attempts to set its effective UID and GID to the owner
+-and group of this directory.
++and group of the subdirectory
++.B sm
++of this directory. After changing the effective ids,
++.B rpc.statd
++only needs to access files in
++.B sm
++and
++.B sm.bak
++within the state-directory-path.
+ .TP
+ .BR -v ", " -V ", " --version
+ Causes
+--
+2.23.0
+
diff --git a/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb b/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb
index 7e80354e4e..458e534864 100644
--- a/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb
+++ b/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.1.bb
@@ -33,6 +33,8 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/utils/nfs-utils/${PV}/nfs-utils-${PV}.tar.x
file://0001-Makefile.am-fix-undefined-function-for-libnsm.a.patch \
file://0001-Don-t-build-tools-with-CC_FOR_BUILD.patch \
file://0001-Fix-include-order-between-config.h-and-stat.h.patch \
+ file://0001-Disable-statx-if-using-glibc-emulation.patch \
+ file://0001-statd-take-user-id-from-var-lib-nfs-sm.patch \
"
SRC_URI_append_libc-glibc = " file://0001-configure.ac-Do-not-fatalize-Wmissing-prototypes.patch"
SRC_URI_append_libc-musl = " file://nfs-utils-musl-res_querydomain.patch"
diff --git a/meta/recipes-connectivity/openssh/openssh/0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch b/meta/recipes-connectivity/openssh/openssh/0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch
new file mode 100644
index 0000000000..e2930c3c7d
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch
@@ -0,0 +1,46 @@
+From 3cccc0a2ab597b8273bddf08e9a3cc5551d7e530 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 3 Jan 2020 03:02:26 +0000
+Subject: [PATCH] upstream: what bozo decided to use 2020 as a future date in a
+ regress
+
+test?
+
+OpenBSD-Regress-ID: 3b953df5a7e14081ff6cf495d4e8d40e153cbc3a
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/ff31f15773ee173502eec4d7861ec56f26bba381]
+
+[Dropped the script version and copyright year change at the top]
+
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ regress/cert-hostkey.sh | 2 +-
+ regress/cert-userkey.sh | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/regress/cert-hostkey.sh b/regress/cert-hostkey.sh
+index 3ce7779..74d5a53 100644
+--- a/regress/cert-hostkey.sh
++++ b/regress/cert-hostkey.sh
+@@ -248,7 +248,7 @@ test_one() {
+ test_one "user-certificate" failure "-n $HOSTS"
+ test_one "empty principals" success "-h"
+ test_one "wrong principals" failure "-h -n foo"
+-test_one "cert not yet valid" failure "-h -V20200101:20300101"
++test_one "cert not yet valid" failure "-h -V20300101:20320101"
+ test_one "cert expired" failure "-h -V19800101:19900101"
+ test_one "cert valid interval" success "-h -V-1w:+2w"
+ test_one "cert has constraints" failure "-h -Oforce-command=false"
+diff --git a/regress/cert-userkey.sh b/regress/cert-userkey.sh
+index 6849e99..de455b8 100644
+--- a/regress/cert-userkey.sh
++++ b/regress/cert-userkey.sh
+@@ -327,7 +327,7 @@ test_one() {
+ test_one "correct principal" success "-n ${USER}"
+ test_one "host-certificate" failure "-n ${USER} -h"
+ test_one "wrong principals" failure "-n foo"
+-test_one "cert not yet valid" failure "-n ${USER} -V20200101:20300101"
++test_one "cert not yet valid" failure "-n ${USER} -V20300101:20320101"
+ test_one "cert expired" failure "-n ${USER} -V19800101:19900101"
+ test_one "cert valid interval" success "-n ${USER} -V-1w:+2w"
+ test_one "wrong source-address" failure "-n ${USER} -Osource-address=10.0.0.0/8"
diff --git a/meta/recipes-connectivity/openssh/openssh_8.0p1.bb b/meta/recipes-connectivity/openssh/openssh_8.0p1.bb
index 2ffbc9a95f..3d16f9d347 100644
--- a/meta/recipes-connectivity/openssh/openssh_8.0p1.bb
+++ b/meta/recipes-connectivity/openssh/openssh_8.0p1.bb
@@ -25,6 +25,7 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar
file://sshd_check_keys \
file://add-test-support-for-busybox.patch \
file://0001-upstream-fix-integer-overflow-in-XMSS-private-key-pa.patch \
+ file://0001-upstream-what-bozo-decided-to-use-2020-as-a-future-d.patch \
"
SRC_URI[md5sum] = "bf050f002fe510e1daecd39044e1122d"
SRC_URI[sha256sum] = "bd943879e69498e8031eb6b7f44d08cdc37d59a7ab689aa0b437320c3481fd68"
diff --git a/meta/recipes-connectivity/openssl/openssl/CVE-2019-1551.patch b/meta/recipes-connectivity/openssl/openssl/CVE-2019-1551.patch
deleted file mode 100644
index 0cc19cb5f4..0000000000
--- a/meta/recipes-connectivity/openssl/openssl/CVE-2019-1551.patch
+++ /dev/null
@@ -1,758 +0,0 @@
-From 419102400a2811582a7a3d4a4e317d72e5ce0a8f Mon Sep 17 00:00:00 2001
-From: Andy Polyakov <appro@openssl.org>
-Date: Wed, 4 Dec 2019 12:48:21 +0100
-Subject: [PATCH] Fix an overflow bug in rsaz_512_sqr
-
-There is an overflow bug in the x64_64 Montgomery squaring procedure used in
-exponentiation with 512-bit moduli. No EC algorithms are affected. Analysis
-suggests that attacks against 2-prime RSA1024, 3-prime RSA1536, and DSA1024 as a
-result of this defect would be very difficult to perform and are not believed
-likely. Attacks against DH512 are considered just feasible. However, for an
-attack the target would have to re-use the DH512 private key, which is not
-recommended anyway. Also applications directly using the low level API
-BN_mod_exp may be affected if they use BN_FLG_CONSTTIME.
-
-CVE-2019-1551
-
-Reviewed-by: Paul Dale <paul.dale@oracle.com>
-Reviewed-by: Bernd Edlinger <bernd.edlinger@hotmail.de>
-(Merged from https://github.com/openssl/openssl/pull/10575)
-
-CVE: CVE-2019-1551
-Upstream-Status: Backport
-Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
----
- crypto/bn/asm/rsaz-x86_64.pl | 381 ++++++++++++++++++-----------------
- 1 file changed, 197 insertions(+), 184 deletions(-)
-
-diff --git a/crypto/bn/asm/rsaz-x86_64.pl b/crypto/bn/asm/rsaz-x86_64.pl
-index b1797b649f0..7534d5cd03e 100755
---- a/crypto/bn/asm/rsaz-x86_64.pl
-+++ b/crypto/bn/asm/rsaz-x86_64.pl
-@@ -116,7 +116,7 @@
- subq \$128+24, %rsp
- .cfi_adjust_cfa_offset 128+24
- .Lsqr_body:
-- movq $mod, %rbp # common argument
-+ movq $mod, %xmm1 # common off-load
- movq ($inp), %rdx
- movq 8($inp), %rax
- movq $n0, 128(%rsp)
-@@ -134,7 +134,8 @@
- .Loop_sqr:
- movl $times,128+8(%rsp)
- #first iteration
-- movq %rdx, %rbx
-+ movq %rdx, %rbx # 0($inp)
-+ mov %rax, %rbp # 8($inp)
- mulq %rdx
- movq %rax, %r8
- movq 16($inp), %rax
-@@ -173,31 +174,29 @@
- mulq %rbx
- addq %rax, %r14
- movq %rbx, %rax
-- movq %rdx, %r15
-- adcq \$0, %r15
-+ adcq \$0, %rdx
-
-- addq %r8, %r8 #shlq \$1, %r8
-- movq %r9, %rcx
-- adcq %r9, %r9 #shld \$1, %r8, %r9
-+ xorq %rcx,%rcx # rcx:r8 = r8 << 1
-+ addq %r8, %r8
-+ movq %rdx, %r15
-+ adcq \$0, %rcx
-
- mulq %rax
-- movq %rax, (%rsp)
-- addq %rdx, %r8
-- adcq \$0, %r9
-+ addq %r8, %rdx
-+ adcq \$0, %rcx
-
-- movq %r8, 8(%rsp)
-- shrq \$63, %rcx
-+ movq %rax, (%rsp)
-+ movq %rdx, 8(%rsp)
-
- #second iteration
-- movq 8($inp), %r8
- movq 16($inp), %rax
-- mulq %r8
-+ mulq %rbp
- addq %rax, %r10
- movq 24($inp), %rax
- movq %rdx, %rbx
- adcq \$0, %rbx
-
-- mulq %r8
-+ mulq %rbp
- addq %rax, %r11
- movq 32($inp), %rax
- adcq \$0, %rdx
-@@ -205,7 +204,7 @@
- movq %rdx, %rbx
- adcq \$0, %rbx
-
-- mulq %r8
-+ mulq %rbp
- addq %rax, %r12
- movq 40($inp), %rax
- adcq \$0, %rdx
-@@ -213,7 +212,7 @@
- movq %rdx, %rbx
- adcq \$0, %rbx
-
-- mulq %r8
-+ mulq %rbp
- addq %rax, %r13
- movq 48($inp), %rax
- adcq \$0, %rdx
-@@ -221,7 +220,7 @@
- movq %rdx, %rbx
- adcq \$0, %rbx
-
-- mulq %r8
-+ mulq %rbp
- addq %rax, %r14
- movq 56($inp), %rax
- adcq \$0, %rdx
-@@ -229,39 +228,39 @@
- movq %rdx, %rbx
- adcq \$0, %rbx
-
-- mulq %r8
-+ mulq %rbp
- addq %rax, %r15
-- movq %r8, %rax
-+ movq %rbp, %rax
- adcq \$0, %rdx
- addq %rbx, %r15
-- movq %rdx, %r8
-- movq %r10, %rdx
-- adcq \$0, %r8
-+ adcq \$0, %rdx
-
-- add %rdx, %rdx
-- lea (%rcx,%r10,2), %r10 #shld \$1, %rcx, %r10
-- movq %r11, %rbx
-- adcq %r11, %r11 #shld \$1, %r10, %r11
-+ xorq %rbx, %rbx # rbx:r10:r9 = r10:r9 << 1
-+ addq %r9, %r9
-+ movq %rdx, %r8
-+ adcq %r10, %r10
-+ adcq \$0, %rbx
-
- mulq %rax
-+ addq %rcx, %rax
-+ movq 16($inp), %rbp
-+ adcq \$0, %rdx
- addq %rax, %r9
-+ movq 24($inp), %rax
- adcq %rdx, %r10
-- adcq \$0, %r11
-+ adcq \$0, %rbx
-
- movq %r9, 16(%rsp)
- movq %r10, 24(%rsp)
-- shrq \$63, %rbx
-
- #third iteration
-- movq 16($inp), %r9
-- movq 24($inp), %rax
-- mulq %r9
-+ mulq %rbp
- addq %rax, %r12
- movq 32($inp), %rax
- movq %rdx, %rcx
- adcq \$0, %rcx
-
-- mulq %r9
-+ mulq %rbp
- addq %rax, %r13
- movq 40($inp), %rax
- adcq \$0, %rdx
-@@ -269,7 +268,7 @@
- movq %rdx, %rcx
- adcq \$0, %rcx
-
-- mulq %r9
-+ mulq %rbp
- addq %rax, %r14
- movq 48($inp), %rax
- adcq \$0, %rdx
-@@ -277,9 +276,7 @@
- movq %rdx, %rcx
- adcq \$0, %rcx
-
-- mulq %r9
-- movq %r12, %r10
-- lea (%rbx,%r12,2), %r12 #shld \$1, %rbx, %r12
-+ mulq %rbp
- addq %rax, %r15
- movq 56($inp), %rax
- adcq \$0, %rdx
-@@ -287,36 +284,40 @@
- movq %rdx, %rcx
- adcq \$0, %rcx
-
-- mulq %r9
-- shrq \$63, %r10
-+ mulq %rbp
- addq %rax, %r8
-- movq %r9, %rax
-+ movq %rbp, %rax
- adcq \$0, %rdx
- addq %rcx, %r8
-- movq %rdx, %r9
-- adcq \$0, %r9
-+ adcq \$0, %rdx
-
-- movq %r13, %rcx
-- leaq (%r10,%r13,2), %r13 #shld \$1, %r12, %r13
-+ xorq %rcx, %rcx # rcx:r12:r11 = r12:r11 << 1
-+ addq %r11, %r11
-+ movq %rdx, %r9
-+ adcq %r12, %r12
-+ adcq \$0, %rcx
-
- mulq %rax
-+ addq %rbx, %rax
-+ movq 24($inp), %r10
-+ adcq \$0, %rdx
- addq %rax, %r11
-+ movq 32($inp), %rax
- adcq %rdx, %r12
-- adcq \$0, %r13
-+ adcq \$0, %rcx
-
- movq %r11, 32(%rsp)
- movq %r12, 40(%rsp)
-- shrq \$63, %rcx
-
- #fourth iteration
-- movq 24($inp), %r10
-- movq 32($inp), %rax
-+ mov %rax, %r11 # 32($inp)
- mulq %r10
- addq %rax, %r14
- movq 40($inp), %rax
- movq %rdx, %rbx
- adcq \$0, %rbx
-
-+ mov %rax, %r12 # 40($inp)
- mulq %r10
- addq %rax, %r15
- movq 48($inp), %rax
-@@ -325,9 +326,8 @@
- movq %rdx, %rbx
- adcq \$0, %rbx
-
-+ mov %rax, %rbp # 48($inp)
- mulq %r10
-- movq %r14, %r12
-- leaq (%rcx,%r14,2), %r14 #shld \$1, %rcx, %r14
- addq %rax, %r8
- movq 56($inp), %rax
- adcq \$0, %rdx
-@@ -336,32 +336,33 @@
- adcq \$0, %rbx
-
- mulq %r10
-- shrq \$63, %r12
- addq %rax, %r9
- movq %r10, %rax
- adcq \$0, %rdx
- addq %rbx, %r9
-- movq %rdx, %r10
-- adcq \$0, %r10
-+ adcq \$0, %rdx
-
-- movq %r15, %rbx
-- leaq (%r12,%r15,2),%r15 #shld \$1, %r14, %r15
-+ xorq %rbx, %rbx # rbx:r13:r14 = r13:r14 << 1
-+ addq %r13, %r13
-+ movq %rdx, %r10
-+ adcq %r14, %r14
-+ adcq \$0, %rbx
-
- mulq %rax
-+ addq %rcx, %rax
-+ adcq \$0, %rdx
- addq %rax, %r13
-+ movq %r12, %rax # 40($inp)
- adcq %rdx, %r14
-- adcq \$0, %r15
-+ adcq \$0, %rbx
-
- movq %r13, 48(%rsp)
- movq %r14, 56(%rsp)
-- shrq \$63, %rbx
-
- #fifth iteration
-- movq 32($inp), %r11
-- movq 40($inp), %rax
- mulq %r11
- addq %rax, %r8
-- movq 48($inp), %rax
-+ movq %rbp, %rax # 48($inp)
- movq %rdx, %rcx
- adcq \$0, %rcx
-
-@@ -369,97 +370,99 @@
- addq %rax, %r9
- movq 56($inp), %rax
- adcq \$0, %rdx
-- movq %r8, %r12
-- leaq (%rbx,%r8,2), %r8 #shld \$1, %rbx, %r8
- addq %rcx, %r9
- movq %rdx, %rcx
- adcq \$0, %rcx
-
-+ mov %rax, %r14 # 56($inp)
- mulq %r11
-- shrq \$63, %r12
- addq %rax, %r10
- movq %r11, %rax
- adcq \$0, %rdx
- addq %rcx, %r10
-- movq %rdx, %r11
-- adcq \$0, %r11
-+ adcq \$0, %rdx
-
-- movq %r9, %rcx
-- leaq (%r12,%r9,2), %r9 #shld \$1, %r8, %r9
-+ xorq %rcx, %rcx # rcx:r8:r15 = r8:r15 << 1
-+ addq %r15, %r15
-+ movq %rdx, %r11
-+ adcq %r8, %r8
-+ adcq \$0, %rcx
-
- mulq %rax
-+ addq %rbx, %rax
-+ adcq \$0, %rdx
- addq %rax, %r15
-+ movq %rbp, %rax # 48($inp)
- adcq %rdx, %r8
-- adcq \$0, %r9
-+ adcq \$0, %rcx
-
- movq %r15, 64(%rsp)
- movq %r8, 72(%rsp)
-- shrq \$63, %rcx
-
- #sixth iteration
-- movq 40($inp), %r12
-- movq 48($inp), %rax
- mulq %r12
- addq %rax, %r10
-- movq 56($inp), %rax
-+ movq %r14, %rax # 56($inp)
- movq %rdx, %rbx
- adcq \$0, %rbx
-
- mulq %r12
- addq %rax, %r11
- movq %r12, %rax
-- movq %r10, %r15
-- leaq (%rcx,%r10,2), %r10 #shld \$1, %rcx, %r10
- adcq \$0, %rdx
-- shrq \$63, %r15
- addq %rbx, %r11
-- movq %rdx, %r12
-- adcq \$0, %r12
-+ adcq \$0, %rdx
-
-- movq %r11, %rbx
-- leaq (%r15,%r11,2), %r11 #shld \$1, %r10, %r11
-+ xorq %rbx, %rbx # rbx:r10:r9 = r10:r9 << 1
-+ addq %r9, %r9
-+ movq %rdx, %r12
-+ adcq %r10, %r10
-+ adcq \$0, %rbx
-
- mulq %rax
-+ addq %rcx, %rax
-+ adcq \$0, %rdx
- addq %rax, %r9
-+ movq %r14, %rax # 56($inp)
- adcq %rdx, %r10
-- adcq \$0, %r11
-+ adcq \$0, %rbx
-
- movq %r9, 80(%rsp)
- movq %r10, 88(%rsp)
-
- #seventh iteration
-- movq 48($inp), %r13
-- movq 56($inp), %rax
-- mulq %r13
-+ mulq %rbp
- addq %rax, %r12
-- movq %r13, %rax
-- movq %rdx, %r13
-- adcq \$0, %r13
-+ movq %rbp, %rax
-+ adcq \$0, %rdx
-
-- xorq %r14, %r14
-- shlq \$1, %rbx
-- adcq %r12, %r12 #shld \$1, %rbx, %r12
-- adcq %r13, %r13 #shld \$1, %r12, %r13
-- adcq %r14, %r14 #shld \$1, %r13, %r14
-+ xorq %rcx, %rcx # rcx:r12:r11 = r12:r11 << 1
-+ addq %r11, %r11
-+ movq %rdx, %r13
-+ adcq %r12, %r12
-+ adcq \$0, %rcx
-
- mulq %rax
-+ addq %rbx, %rax
-+ adcq \$0, %rdx
- addq %rax, %r11
-+ movq %r14, %rax # 56($inp)
- adcq %rdx, %r12
-- adcq \$0, %r13
-+ adcq \$0, %rcx
-
- movq %r11, 96(%rsp)
- movq %r12, 104(%rsp)
-
- #eighth iteration
-- movq 56($inp), %rax
-+ xorq %rbx, %rbx # rbx:r13 = r13 << 1
-+ addq %r13, %r13
-+ adcq \$0, %rbx
-+
- mulq %rax
-- addq %rax, %r13
-+ addq %rcx, %rax
- adcq \$0, %rdx
--
-- addq %rdx, %r14
--
-- movq %r13, 112(%rsp)
-- movq %r14, 120(%rsp)
-+ addq %r13, %rax
-+ adcq %rbx, %rdx
-
- movq (%rsp), %r8
- movq 8(%rsp), %r9
-@@ -469,6 +472,10 @@
- movq 40(%rsp), %r13
- movq 48(%rsp), %r14
- movq 56(%rsp), %r15
-+ movq %xmm1, %rbp
-+
-+ movq %rax, 112(%rsp)
-+ movq %rdx, 120(%rsp)
-
- call __rsaz_512_reduce
-
-@@ -500,9 +507,9 @@
- .Loop_sqrx:
- movl $times,128+8(%rsp)
- movq $out, %xmm0 # off-load
-- movq %rbp, %xmm1 # off-load
- #first iteration
- mulx %rax, %r8, %r9
-+ mov %rax, %rbx
-
- mulx 16($inp), %rcx, %r10
- xor %rbp, %rbp # cf=0, of=0
-@@ -510,40 +517,39 @@
- mulx 24($inp), %rax, %r11
- adcx %rcx, %r9
-
-- mulx 32($inp), %rcx, %r12
-+ .byte 0xc4,0x62,0xf3,0xf6,0xa6,0x20,0x00,0x00,0x00 # mulx 32($inp), %rcx, %r12
- adcx %rax, %r10
-
-- mulx 40($inp), %rax, %r13
-+ .byte 0xc4,0x62,0xfb,0xf6,0xae,0x28,0x00,0x00,0x00 # mulx 40($inp), %rax, %r13
- adcx %rcx, %r11
-
-- .byte 0xc4,0x62,0xf3,0xf6,0xb6,0x30,0x00,0x00,0x00 # mulx 48($inp), %rcx, %r14
-+ mulx 48($inp), %rcx, %r14
- adcx %rax, %r12
- adcx %rcx, %r13
-
-- .byte 0xc4,0x62,0xfb,0xf6,0xbe,0x38,0x00,0x00,0x00 # mulx 56($inp), %rax, %r15
-+ mulx 56($inp), %rax, %r15
- adcx %rax, %r14
- adcx %rbp, %r15 # %rbp is 0
-
-- mov %r9, %rcx
-- shld \$1, %r8, %r9
-- shl \$1, %r8
--
-- xor %ebp, %ebp
-- mulx %rdx, %rax, %rdx
-- adcx %rdx, %r8
-- mov 8($inp), %rdx
-- adcx %rbp, %r9
-+ mulx %rdx, %rax, $out
-+ mov %rbx, %rdx # 8($inp)
-+ xor %rcx, %rcx
-+ adox %r8, %r8
-+ adcx $out, %r8
-+ adox %rbp, %rcx
-+ adcx %rbp, %rcx
-
- mov %rax, (%rsp)
- mov %r8, 8(%rsp)
-
- #second iteration
-- mulx 16($inp), %rax, %rbx
-+ .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x10,0x00,0x00,0x00 # mulx 16($inp), %rax, %rbx
- adox %rax, %r10
- adcx %rbx, %r11
-
-- .byte 0xc4,0x62,0xc3,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r8
-+ mulx 24($inp), $out, %r8
- adox $out, %r11
-+ .byte 0x66
- adcx %r8, %r12
-
- mulx 32($inp), %rax, %rbx
-@@ -561,24 +567,25 @@
- .byte 0xc4,0x62,0xc3,0xf6,0x86,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r8
- adox $out, %r15
- adcx %rbp, %r8
-+ mulx %rdx, %rax, $out
- adox %rbp, %r8
-+ .byte 0x48,0x8b,0x96,0x10,0x00,0x00,0x00 # mov 16($inp), %rdx
-
-- mov %r11, %rbx
-- shld \$1, %r10, %r11
-- shld \$1, %rcx, %r10
--
-- xor %ebp,%ebp
-- mulx %rdx, %rax, %rcx
-- mov 16($inp), %rdx
-+ xor %rbx, %rbx
-+ adcx %rcx, %rax
-+ adox %r9, %r9
-+ adcx %rbp, $out
-+ adox %r10, %r10
- adcx %rax, %r9
-- adcx %rcx, %r10
-- adcx %rbp, %r11
-+ adox %rbp, %rbx
-+ adcx $out, %r10
-+ adcx %rbp, %rbx
-
- mov %r9, 16(%rsp)
- .byte 0x4c,0x89,0x94,0x24,0x18,0x00,0x00,0x00 # mov %r10, 24(%rsp)
-
- #third iteration
-- .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r9
-+ mulx 24($inp), $out, %r9
- adox $out, %r12
- adcx %r9, %r13
-
-@@ -586,7 +593,7 @@
- adox %rax, %r13
- adcx %rcx, %r14
-
-- mulx 40($inp), $out, %r9
-+ .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x28,0x00,0x00,0x00 # mulx 40($inp), $out, %r9
- adox $out, %r14
- adcx %r9, %r15
-
-@@ -594,27 +601,28 @@
- adox %rax, %r15
- adcx %rcx, %r8
-
-- .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r9
-+ mulx 56($inp), $out, %r9
- adox $out, %r8
- adcx %rbp, %r9
-+ mulx %rdx, %rax, $out
- adox %rbp, %r9
-+ mov 24($inp), %rdx
-
-- mov %r13, %rcx
-- shld \$1, %r12, %r13
-- shld \$1, %rbx, %r12
--
-- xor %ebp, %ebp
-- mulx %rdx, %rax, %rdx
-+ xor %rcx, %rcx
-+ adcx %rbx, %rax
-+ adox %r11, %r11
-+ adcx %rbp, $out
-+ adox %r12, %r12
- adcx %rax, %r11
-- adcx %rdx, %r12
-- mov 24($inp), %rdx
-- adcx %rbp, %r13
-+ adox %rbp, %rcx
-+ adcx $out, %r12
-+ adcx %rbp, %rcx
-
- mov %r11, 32(%rsp)
-- .byte 0x4c,0x89,0xa4,0x24,0x28,0x00,0x00,0x00 # mov %r12, 40(%rsp)
-+ mov %r12, 40(%rsp)
-
- #fourth iteration
-- .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x20,0x00,0x00,0x00 # mulx 32($inp), %rax, %rbx
-+ mulx 32($inp), %rax, %rbx
- adox %rax, %r14
- adcx %rbx, %r15
-
-@@ -629,25 +637,25 @@
- mulx 56($inp), $out, %r10
- adox $out, %r9
- adcx %rbp, %r10
-+ mulx %rdx, %rax, $out
- adox %rbp, %r10
-+ mov 32($inp), %rdx
-
-- .byte 0x66
-- mov %r15, %rbx
-- shld \$1, %r14, %r15
-- shld \$1, %rcx, %r14
--
-- xor %ebp, %ebp
-- mulx %rdx, %rax, %rdx
-+ xor %rbx, %rbx
-+ adcx %rcx, %rax
-+ adox %r13, %r13
-+ adcx %rbp, $out
-+ adox %r14, %r14
- adcx %rax, %r13
-- adcx %rdx, %r14
-- mov 32($inp), %rdx
-- adcx %rbp, %r15
-+ adox %rbp, %rbx
-+ adcx $out, %r14
-+ adcx %rbp, %rbx
-
- mov %r13, 48(%rsp)
- mov %r14, 56(%rsp)
-
- #fifth iteration
-- .byte 0xc4,0x62,0xc3,0xf6,0x9e,0x28,0x00,0x00,0x00 # mulx 40($inp), $out, %r11
-+ mulx 40($inp), $out, %r11
- adox $out, %r8
- adcx %r11, %r9
-
-@@ -658,18 +666,19 @@
- mulx 56($inp), $out, %r11
- adox $out, %r10
- adcx %rbp, %r11
-+ mulx %rdx, %rax, $out
-+ mov 40($inp), %rdx
- adox %rbp, %r11
-
-- mov %r9, %rcx
-- shld \$1, %r8, %r9
-- shld \$1, %rbx, %r8
--
-- xor %ebp, %ebp
-- mulx %rdx, %rax, %rdx
-+ xor %rcx, %rcx
-+ adcx %rbx, %rax
-+ adox %r15, %r15
-+ adcx %rbp, $out
-+ adox %r8, %r8
- adcx %rax, %r15
-- adcx %rdx, %r8
-- mov 40($inp), %rdx
-- adcx %rbp, %r9
-+ adox %rbp, %rcx
-+ adcx $out, %r8
-+ adcx %rbp, %rcx
-
- mov %r15, 64(%rsp)
- mov %r8, 72(%rsp)
-@@ -682,18 +691,19 @@
- .byte 0xc4,0x62,0xc3,0xf6,0xa6,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r12
- adox $out, %r11
- adcx %rbp, %r12
-+ mulx %rdx, %rax, $out
- adox %rbp, %r12
-+ mov 48($inp), %rdx
-
-- mov %r11, %rbx
-- shld \$1, %r10, %r11
-- shld \$1, %rcx, %r10
--
-- xor %ebp, %ebp
-- mulx %rdx, %rax, %rdx
-+ xor %rbx, %rbx
-+ adcx %rcx, %rax
-+ adox %r9, %r9
-+ adcx %rbp, $out
-+ adox %r10, %r10
- adcx %rax, %r9
-- adcx %rdx, %r10
-- mov 48($inp), %rdx
-- adcx %rbp, %r11
-+ adcx $out, %r10
-+ adox %rbp, %rbx
-+ adcx %rbp, %rbx
-
- mov %r9, 80(%rsp)
- mov %r10, 88(%rsp)
-@@ -703,31 +713,31 @@
- adox %rax, %r12
- adox %rbp, %r13
-
-- xor %r14, %r14
-- shld \$1, %r13, %r14
-- shld \$1, %r12, %r13
-- shld \$1, %rbx, %r12
--
-- xor %ebp, %ebp
-- mulx %rdx, %rax, %rdx
-- adcx %rax, %r11
-- adcx %rdx, %r12
-+ mulx %rdx, %rax, $out
-+ xor %rcx, %rcx
- mov 56($inp), %rdx
-- adcx %rbp, %r13
-+ adcx %rbx, %rax
-+ adox %r11, %r11
-+ adcx %rbp, $out
-+ adox %r12, %r12
-+ adcx %rax, %r11
-+ adox %rbp, %rcx
-+ adcx $out, %r12
-+ adcx %rbp, %rcx
-
- .byte 0x4c,0x89,0x9c,0x24,0x60,0x00,0x00,0x00 # mov %r11, 96(%rsp)
- .byte 0x4c,0x89,0xa4,0x24,0x68,0x00,0x00,0x00 # mov %r12, 104(%rsp)
-
- #eighth iteration
- mulx %rdx, %rax, %rdx
-- adox %rax, %r13
-- adox %rbp, %rdx
-+ xor %rbx, %rbx
-+ adcx %rcx, %rax
-+ adox %r13, %r13
-+ adcx %rbp, %rdx
-+ adox %rbp, %rbx
-+ adcx %r13, %rax
-+ adcx %rdx, %rbx
-
-- .byte 0x66
-- add %rdx, %r14
--
-- movq %r13, 112(%rsp)
-- movq %r14, 120(%rsp)
- movq %xmm0, $out
- movq %xmm1, %rbp
-
-@@ -741,6 +751,9 @@
- movq 48(%rsp), %r14
- movq 56(%rsp), %r15
-
-+ movq %rax, 112(%rsp)
-+ movq %rbx, 120(%rsp)
-+
- call __rsaz_512_reducex
-
- addq 64(%rsp), %r8
diff --git a/meta/recipes-connectivity/openssl/openssl/reproducible.patch b/meta/recipes-connectivity/openssl/openssl/reproducible.patch
new file mode 100644
index 0000000000..a24260c95d
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/reproducible.patch
@@ -0,0 +1,32 @@
+The value for perl_archname can vary depending on the host, e.g.
+x86_64-linux-gnu-thread-multi or x86_64-linux-thread-multi which
+makes the ptest package non-reproducible. Its unused other than
+these references so drop it.
+
+RP 2020/2/6
+
+Upstream-Status: Pending
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: openssl-1.1.1d/Configure
+===================================================================
+--- openssl-1.1.1d.orig/Configure
++++ openssl-1.1.1d/Configure
+@@ -286,7 +286,7 @@ if (defined env($local_config_envname))
+ # Save away perl command information
+ $config{perl_cmd} = $^X;
+ $config{perl_version} = $Config{version};
+-$config{perl_archname} = $Config{archname};
++#$config{perl_archname} = $Config{archname};
+
+ $config{prefix}="";
+ $config{openssldir}="";
+@@ -2517,7 +2517,7 @@ _____
+ @{$config{perlargv}}), "\n";
+ print "\nPerl information:\n\n";
+ print ' ',$config{perl_cmd},"\n";
+- print ' ',$config{perl_version},' for ',$config{perl_archname},"\n";
++ print ' ',$config{perl_version},"\n";
+ }
+ if ($dump || $options) {
+ my $longest = 0;
diff --git a/meta/recipes-connectivity/openssl/openssl_1.1.1d.bb b/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb
index 458ae7daf4..c514fcd82a 100644
--- a/meta/recipes-connectivity/openssl/openssl_1.1.1d.bb
+++ b/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb
@@ -16,15 +16,14 @@ SRC_URI = "http://www.openssl.org/source/openssl-${PV}.tar.gz \
file://0001-skip-test_symbol_presence.patch \
file://0001-buildinfo-strip-sysroot-and-debug-prefix-map-from-co.patch \
file://afalg.patch \
- file://CVE-2019-1551.patch \
+ file://reproducible.patch \
"
SRC_URI_append_class-nativesdk = " \
file://environment.d-openssl.sh \
"
-SRC_URI[md5sum] = "3be209000dbc7e1b95bcdf47980a3baa"
-SRC_URI[sha256sum] = "1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2"
+SRC_URI[sha256sum] = "ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46"
inherit lib_package multilib_header multilib_script ptest
MULTILIB_SCRIPTS = "${PN}-bin:${bindir}/c_rehash"
@@ -33,7 +32,7 @@ PACKAGECONFIG ?= ""
PACKAGECONFIG_class-native = ""
PACKAGECONFIG_class-nativesdk = ""
-PACKAGECONFIG[cryptodev-linux] = "enable-devcryptoeng,disable-devcryptoeng,cryptodev-linux"
+PACKAGECONFIG[cryptodev-linux] = "enable-devcryptoeng,disable-devcryptoeng,cryptodev-linux,,cryptodev-module"
B = "${WORKDIR}/build"
do_configure[cleandirs] = "${B}"
diff --git a/meta/recipes-connectivity/ppp/ppp/0001-pppd-Fix-bounds-check-in-EAP-code.patch b/meta/recipes-connectivity/ppp/ppp/0001-pppd-Fix-bounds-check-in-EAP-code.patch
new file mode 100644
index 0000000000..b7ba7ba643
--- /dev/null
+++ b/meta/recipes-connectivity/ppp/ppp/0001-pppd-Fix-bounds-check-in-EAP-code.patch
@@ -0,0 +1,47 @@
+From 8d7970b8f3db727fe798b65f3377fe6787575426 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Mon, 3 Feb 2020 15:53:28 +1100
+Subject: [PATCH] pppd: Fix bounds check in EAP code
+
+Given that we have just checked vallen < len, it can never be the case
+that vallen >= len + sizeof(rhostname). This fixes the check so we
+actually avoid overflowing the rhostname array.
+
+Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+
+Upstream-Status: Backport
+[https://github.com/paulusmack/ppp/commit/8d7970b8f3db727fe798b65f3377fe6787575426]
+
+CVE: CVE-2020-8597
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+---
+ pppd/eap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/pppd/eap.c b/pppd/eap.c
+index 94407f5..1b93db0 100644
+--- a/pppd/eap.c
++++ b/pppd/eap.c
+@@ -1420,7 +1420,7 @@ int len;
+ }
+
+ /* Not so likely to happen. */
+- if (vallen >= len + sizeof (rhostname)) {
++ if (len - vallen >= sizeof (rhostname)) {
+ dbglog("EAP: trimming really long peer name down");
+ BCOPY(inp + vallen, rhostname, sizeof (rhostname) - 1);
+ rhostname[sizeof (rhostname) - 1] = '\0';
+@@ -1846,7 +1846,7 @@ int len;
+ }
+
+ /* Not so likely to happen. */
+- if (vallen >= len + sizeof (rhostname)) {
++ if (len - vallen >= sizeof (rhostname)) {
+ dbglog("EAP: trimming really long peer name down");
+ BCOPY(inp + vallen, rhostname, sizeof (rhostname) - 1);
+ rhostname[sizeof (rhostname) - 1] = '\0';
+--
+2.17.1
+
diff --git a/meta/recipes-connectivity/ppp/ppp_2.4.7.bb b/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
index 644cde4562..60c56dd0bd 100644
--- a/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
+++ b/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
@@ -33,6 +33,7 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/${BP}.tar.gz \
file://0001-pppoe-include-netinet-in.h-before-linux-in.h.patch \
file://0001-ppp-Remove-unneeded-include.patch \
file://ppp-2.4.7-DES-openssl.patch \
+ file://0001-pppd-Fix-bounds-check-in-EAP-code.patch \
"
SRC_URI_append_libc-musl = "\
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch
new file mode 100644
index 0000000000..53ad5d028a
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch
@@ -0,0 +1,151 @@
+From 5b78c8f961f25f4dc22d6f2b77ddd06d712cec63 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <jouni@codeaurora.org>
+Date: Wed, 3 Jun 2020 23:17:35 +0300
+Subject: [PATCH 1/3] WPS UPnP: Do not allow event subscriptions with URLs to
+ other networks
+
+The UPnP Device Architecture 2.0 specification errata ("UDA errata
+16-04-2020.docx") addresses a problem with notifications being allowed
+to go out to other domains by disallowing such cases. Do such filtering
+for the notification callback URLs to avoid undesired connections to
+external networks based on subscriptions that any device in the local
+network could request when WPS support for external registrars is
+enabled (the upnp_iface parameter in hostapd configuration).
+
+Upstream-Status: Backport
+CVE: CVE-2020-12695 patch #1
+Signed-off-by: Jouni Malinen <jouni@codeaurora.org>
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/wps/wps_er.c | 2 +-
+ src/wps/wps_upnp.c | 38 ++++++++++++++++++++++++++++++++++++--
+ src/wps/wps_upnp_i.h | 3 ++-
+ 3 files changed, 39 insertions(+), 4 deletions(-)
+
+Index: wpa_supplicant-2.9/src/wps/wps_er.c
+===================================================================
+--- wpa_supplicant-2.9.orig/src/wps/wps_er.c
++++ wpa_supplicant-2.9/src/wps/wps_er.c
+@@ -1298,7 +1298,7 @@ wps_er_init(struct wps_context *wps, con
+ "with %s", filter);
+ }
+ if (get_netif_info(er->ifname, &er->ip_addr, &er->ip_addr_text,
+- er->mac_addr)) {
++ NULL, er->mac_addr)) {
+ wpa_printf(MSG_INFO, "WPS UPnP: Could not get IP/MAC address "
+ "for %s. Does it have IP address?", er->ifname);
+ wps_er_deinit(er, NULL, NULL);
+Index: wpa_supplicant-2.9/src/wps/wps_upnp.c
+===================================================================
+--- wpa_supplicant-2.9.orig/src/wps/wps_upnp.c
++++ wpa_supplicant-2.9/src/wps/wps_upnp.c
+@@ -303,6 +303,14 @@ static void subscr_addr_free_all(struct
+ }
+
+
++static int local_network_addr(struct upnp_wps_device_sm *sm,
++ struct sockaddr_in *addr)
++{
++ return (addr->sin_addr.s_addr & sm->netmask.s_addr) ==
++ (sm->ip_addr & sm->netmask.s_addr);
++}
++
++
+ /* subscr_addr_add_url -- add address(es) for one url to subscription */
+ static void subscr_addr_add_url(struct subscription *s, const char *url,
+ size_t url_len)
+@@ -381,6 +389,7 @@ static void subscr_addr_add_url(struct s
+
+ for (rp = result; rp; rp = rp->ai_next) {
+ struct subscr_addr *a;
++ struct sockaddr_in *addr = (struct sockaddr_in *) rp->ai_addr;
+
+ /* Limit no. of address to avoid denial of service attack */
+ if (dl_list_len(&s->addr_list) >= MAX_ADDR_PER_SUBSCRIPTION) {
+@@ -389,6 +398,13 @@ static void subscr_addr_add_url(struct s
+ break;
+ }
+
++ if (!local_network_addr(s->sm, addr)) {
++ wpa_printf(MSG_INFO,
++ "WPS UPnP: Ignore a delivery URL that points to another network %s",
++ inet_ntoa(addr->sin_addr));
++ continue;
++ }
++
+ a = os_zalloc(sizeof(*a) + alloc_len);
+ if (a == NULL)
+ break;
+@@ -889,11 +905,12 @@ static int eth_get(const char *device, u
+ * @net_if: Selected network interface name
+ * @ip_addr: Buffer for returning IP address in network byte order
+ * @ip_addr_text: Buffer for returning a pointer to allocated IP address text
++ * @netmask: Buffer for returning netmask or %NULL if not needed
+ * @mac: Buffer for returning MAC address
+ * Returns: 0 on success, -1 on failure
+ */
+ int get_netif_info(const char *net_if, unsigned *ip_addr, char **ip_addr_text,
+- u8 mac[ETH_ALEN])
++ struct in_addr *netmask, u8 mac[ETH_ALEN])
+ {
+ struct ifreq req;
+ int sock = -1;
+@@ -919,6 +936,19 @@ int get_netif_info(const char *net_if, u
+ in_addr.s_addr = *ip_addr;
+ os_snprintf(*ip_addr_text, 16, "%s", inet_ntoa(in_addr));
+
++ if (netmask) {
++ os_memset(&req, 0, sizeof(req));
++ os_strlcpy(req.ifr_name, net_if, sizeof(req.ifr_name));
++ if (ioctl(sock, SIOCGIFNETMASK, &req) < 0) {
++ wpa_printf(MSG_ERROR,
++ "WPS UPnP: SIOCGIFNETMASK failed: %d (%s)",
++ errno, strerror(errno));
++ goto fail;
++ }
++ addr = (struct sockaddr_in *) &req.ifr_netmask;
++ netmask->s_addr = addr->sin_addr.s_addr;
++ }
++
+ #ifdef __linux__
+ os_strlcpy(req.ifr_name, net_if, sizeof(req.ifr_name));
+ if (ioctl(sock, SIOCGIFHWADDR, &req) < 0) {
+@@ -1025,11 +1055,15 @@ static int upnp_wps_device_start(struct
+
+ /* Determine which IP and mac address we're using */
+ if (get_netif_info(net_if, &sm->ip_addr, &sm->ip_addr_text,
+- sm->mac_addr)) {
++ &sm->netmask, sm->mac_addr)) {
+ wpa_printf(MSG_INFO, "WPS UPnP: Could not get IP/MAC address "
+ "for %s. Does it have IP address?", net_if);
+ goto fail;
+ }
++ wpa_printf(MSG_DEBUG, "WPS UPnP: Local IP address %s netmask %s hwaddr "
++ MACSTR,
++ sm->ip_addr_text, inet_ntoa(sm->netmask),
++ MAC2STR(sm->mac_addr));
+
+ /* Listen for incoming TCP connections so that others
+ * can fetch our "xml files" from us.
+Index: wpa_supplicant-2.9/src/wps/wps_upnp_i.h
+===================================================================
+--- wpa_supplicant-2.9.orig/src/wps/wps_upnp_i.h
++++ wpa_supplicant-2.9/src/wps/wps_upnp_i.h
+@@ -128,6 +128,7 @@ struct upnp_wps_device_sm {
+ u8 mac_addr[ETH_ALEN]; /* mac addr of network i.f. we use */
+ char *ip_addr_text; /* IP address of network i.f. we use */
+ unsigned ip_addr; /* IP address of network i.f. we use (host order) */
++ struct in_addr netmask;
+ int multicast_sd; /* send multicast messages over this socket */
+ int ssdp_sd; /* receive discovery UPD packets on socket */
+ int ssdp_sd_registered; /* nonzero if we must unregister */
+@@ -158,7 +159,7 @@ struct subscription * subscription_find(
+ const u8 uuid[UUID_LEN]);
+ void subscr_addr_delete(struct subscr_addr *a);
+ int get_netif_info(const char *net_if, unsigned *ip_addr, char **ip_addr_text,
+- u8 mac[ETH_ALEN]);
++ struct in_addr *netmask, u8 mac[ETH_ALEN]);
+
+ /* wps_upnp_ssdp.c */
+ void msearchreply_state_machine_stop(struct advertisement_state_machine *a);
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch
new file mode 100644
index 0000000000..59640859dd
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch
@@ -0,0 +1,62 @@
+From f7d268864a2660b7239b9a8ff5ad37faeeb751ba Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <jouni@codeaurora.org>
+Date: Wed, 3 Jun 2020 22:41:02 +0300
+Subject: [PATCH 2/3] WPS UPnP: Fix event message generation using a long URL
+ path
+
+More than about 700 character URL ended up overflowing the wpabuf used
+for building the event notification and this resulted in the wpabuf
+buffer overflow checks terminating the hostapd process. Fix this by
+allocating the buffer to be large enough to contain the full URL path.
+However, since that around 700 character limit has been the practical
+limit for more than ten years, start explicitly enforcing that as the
+limit or the callback URLs since any longer ones had not worked before
+and there is no need to enable them now either.
+
+Upstream-Status: Backport
+CVE: CVE-2020-12695 patch #2
+Signed-off-by: Jouni Malinen <jouni@codeaurora.org>
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/wps/wps_upnp.c | 9 +++++++--
+ src/wps/wps_upnp_event.c | 3 ++-
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/src/wps/wps_upnp.c b/src/wps/wps_upnp.c
+index 7d4b7439940e..ab685d52ecab 100644
+--- a/src/wps/wps_upnp.c
++++ b/src/wps/wps_upnp.c
+@@ -328,9 +328,14 @@ static void subscr_addr_add_url(struct subscription *s, const char *url,
+ int rerr;
+ size_t host_len, path_len;
+
+- /* url MUST begin with http: */
+- if (url_len < 7 || os_strncasecmp(url, "http://", 7))
++ /* URL MUST begin with HTTP scheme. In addition, limit the length of
++ * the URL to 700 characters which is around the limit that was
++ * implicitly enforced for more than 10 years due to a bug in
++ * generating the event messages. */
++ if (url_len < 7 || os_strncasecmp(url, "http://", 7) || url_len > 700) {
++ wpa_printf(MSG_DEBUG, "WPS UPnP: Reject an unacceptable URL");
+ goto fail;
++ }
+ url += 7;
+ url_len -= 7;
+
+diff --git a/src/wps/wps_upnp_event.c b/src/wps/wps_upnp_event.c
+index d7e6edcc6503..08a23612f338 100644
+--- a/src/wps/wps_upnp_event.c
++++ b/src/wps/wps_upnp_event.c
+@@ -147,7 +147,8 @@ static struct wpabuf * event_build_message(struct wps_event_ *e)
+ struct wpabuf *buf;
+ char *b;
+
+- buf = wpabuf_alloc(1000 + wpabuf_len(e->data));
++ buf = wpabuf_alloc(1000 + os_strlen(e->addr->path) +
++ wpabuf_len(e->data));
+ if (buf == NULL)
+ return NULL;
+ wpabuf_printf(buf, "NOTIFY %s HTTP/1.1\r\n", e->addr->path);
+--
+2.20.1
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch
new file mode 100644
index 0000000000..8a014ef28a
--- /dev/null
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch
@@ -0,0 +1,50 @@
+From 85aac526af8612c21b3117dadc8ef5944985b476 Mon Sep 17 00:00:00 2001
+From: Jouni Malinen <jouni@codeaurora.org>
+Date: Thu, 4 Jun 2020 21:24:04 +0300
+Subject: [PATCH 3/3] WPS UPnP: Handle HTTP initiation failures for events more
+ properly
+
+While it is appropriate to try to retransmit the event to another
+callback URL on a failure to initiate the HTTP client connection, there
+is no point in trying the exact same operation multiple times in a row.
+Replve the event_retry() calls with event_addr_failure() for these cases
+to avoid busy loops trying to repeat the same failing operation.
+
+These potential busy loops would go through eloop callbacks, so the
+process is not completely stuck on handling them, but unnecessary CPU
+would be used to process the continues retries that will keep failing
+for the same reason.
+
+Upstream-Status: Backport
+CVE: CVE-2020-12695 patch #2
+Signed-off-by: Jouni Malinen <jouni@codeaurora.org>
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/wps/wps_upnp_event.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/wps/wps_upnp_event.c b/src/wps/wps_upnp_event.c
+index 08a23612f338..c0d9e41d9a38 100644
+--- a/src/wps/wps_upnp_event.c
++++ b/src/wps/wps_upnp_event.c
+@@ -294,7 +294,7 @@ static int event_send_start(struct subscription *s)
+
+ buf = event_build_message(e);
+ if (buf == NULL) {
+- event_retry(e, 0);
++ event_addr_failure(e);
+ return -1;
+ }
+
+@@ -302,7 +302,7 @@ static int event_send_start(struct subscription *s)
+ event_http_cb, e);
+ if (e->http_event == NULL) {
+ wpabuf_free(buf);
+- event_retry(e, 0);
++ event_addr_failure(e);
+ return -1;
+ }
+
+--
+2.20.1
diff --git a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
index 2db09ad2c6..de882fad55 100644
--- a/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
+++ b/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb
@@ -15,7 +15,7 @@ PACKAGECONFIG[openssl] = ",,openssl"
inherit pkgconfig systemd
-SYSTEMD_SERVICE_${PN} = "wpa_supplicant.service wpa_supplicant-nl80211@.service wpa_supplicant-wired@.service"
+SYSTEMD_SERVICE_${PN} = "wpa_supplicant.service"
SYSTEMD_AUTO_ENABLE = "disable"
SRC_URI = "http://w1.fi/releases/wpa_supplicant-${PV}.tar.gz \
@@ -25,7 +25,10 @@ SRC_URI = "http://w1.fi/releases/wpa_supplicant-${PV}.tar.gz \
file://wpa_supplicant.conf-sane \
file://99_wpa_supplicant \
file://0001-replace-systemd-install-Alias-with-WantedBy.patch \
- file://0001-AP-Silently-ignore-management-frame-from-unexpected-.patch \
+ file://0001-AP-Silently-ignore-management-frame-from-unexpected-.patch \
+ file://0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch \
+ file://0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch \
+ file://0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch \
"
SRC_URI[md5sum] = "2d2958c782576dc9901092fbfecb4190"
SRC_URI[sha256sum] = "fcbdee7b4a64bea8177973299c8c824419c413ec2e3a95db63dd6a5dc3541f17"
@@ -37,13 +40,13 @@ S = "${WORKDIR}/wpa_supplicant-${PV}"
PACKAGES_prepend = "wpa-supplicant-passphrase wpa-supplicant-cli "
FILES_wpa-supplicant-passphrase = "${bindir}/wpa_passphrase"
FILES_wpa-supplicant-cli = "${sbindir}/wpa_cli"
-FILES_${PN} += "${datadir}/dbus-1/system-services/*"
+FILES_${PN} += "${datadir}/dbus-1/system-services/* ${systemd_system_unitdir}/*"
CONFFILES_${PN} += "${sysconfdir}/wpa_supplicant.conf"
do_configure () {
${MAKE} -C wpa_supplicant clean
install -m 0755 ${WORKDIR}/defconfig wpa_supplicant/.config
-
+
if echo "${PACKAGECONFIG}" | grep -qw "openssl"; then
ssl=openssl
elif echo "${PACKAGECONFIG}" | grep -qw "gnutls"; then
diff --git a/meta/recipes-core/busybox/busybox.inc b/meta/recipes-core/busybox/busybox.inc
index bf6ddae7d1..33c84bc2c1 100644
--- a/meta/recipes-core/busybox/busybox.inc
+++ b/meta/recipes-core/busybox/busybox.inc
@@ -431,6 +431,32 @@ fi
d.prependVar('pkg_postinst_%s' % pkg, postinst)
}
+pkg_postinst_${PN}_prepend () {
+ # Need path to saved utils, but they may have be removed on upgrade of busybox
+ # Only use shell to get paths. Also capture if busybox was saved.
+ BUSYBOX=""
+ if [ "x$D" = "x" ] ; then
+ for busybox_rmdir in /tmp/busyboxrm-*; do
+ if [ "$busybox_rmdir" != '/tmp/busyboxrm-*' ] ; then
+ export PATH=$busybox_rmdir:$PATH
+ if [ -e $busybox_rmdir/busybox* ] ; then
+ BUSYBOX="$busybox_rmdir/busybox*"
+ fi
+ fi
+ done
+ fi
+}
+
+pkg_postinst_${PN}_append () {
+ # If busybox exists in the remove directory it is because it was the only shell left.
+ if [ "x$D" = "x" ] ; then
+ if [ "x$BUSYBOX" != "x" ] ; then
+ update-alternatives --remove sh $BUSYBOX
+ rm -f $BUSYBOX
+ fi
+ fi
+}
+
pkg_prerm_${PN} () {
# This is so you can make busybox commit suicide - removing busybox with no other packages
# providing its files, this will make update-alternatives work, but the update-rc.d part
@@ -451,9 +477,26 @@ pkg_prerm_${PN} () {
ln -s ${base_bindir}/busybox $tmpdir/grep
ln -s ${base_bindir}/busybox $tmpdir/tail
export PATH=$PATH:$tmpdir
+
+ # If busybox is the shell, we need to save it since its the lowest priority shell
+ # Register saved bitbake as the lowest priority shell possible as back up.
+ if [ -n "$(readlink -f /bin/sh | grep busybox)" ] ; then
+ BUSYBOX=$(readlink -f /bin/sh)
+ cp $BUSYBOX $tmpdir/$(basename $BUSYBOX)
+ update-alternatives --install /bin/sh sh $tmpdir/$(basename $BUSYBOX) 1
+ fi
}
pkg_postrm_${PN} () {
+ # Add path to remove dir in case we removed our only grep
+ if [ "x$D" = "x" ] ; then
+ for busybox_rmdir in /tmp/busyboxrm-*; do
+ if [ "$busybox_rmdir" != '/tmp/busyboxrm-*' ] ; then
+ export PATH=$busybox_rmdir:$PATH
+ fi
+ done
+ fi
+
if grep -q "^${base_bindir}/bash$" $D${sysconfdir}/busybox.links* && [ ! -e $D${base_bindir}/bash ]; then
printf "$(grep -v "^${base_bindir}/bash$" $D${sysconfdir}/shells)\n" > $D${sysconfdir}/shells
fi
diff --git a/meta/recipes-core/dbus/dbus/CVE-2020-12049.patch b/meta/recipes-core/dbus/dbus/CVE-2020-12049.patch
new file mode 100644
index 0000000000..ac7a4b7a71
--- /dev/null
+++ b/meta/recipes-core/dbus/dbus/CVE-2020-12049.patch
@@ -0,0 +1,78 @@
+From 872b085f12f56da25a2dbd9bd0b2dff31d5aea63 Mon Sep 17 00:00:00 2001
+From: Simon McVittie <smcv@collabora.com>
+Date: Thu, 16 Apr 2020 14:45:11 +0100
+Subject: [PATCH] sysdeps-unix: On MSG_CTRUNC, close the fds we did receive
+
+MSG_CTRUNC indicates that we have received fewer fds that we should
+have done because the buffer was too small, but we were treating it
+as though it indicated that we received *no* fds. If we received any,
+we still have to make sure we close them, otherwise they will be leaked.
+
+On the system bus, if an attacker can induce us to leak fds in this
+way, that's a local denial of service via resource exhaustion.
+
+Reported-by: Kevin Backhouse, GitHub Security Lab
+Fixes: dbus#294
+Fixes: CVE-2020-12049
+Fixes: GHSL-2020-057
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/dbus/dbus/-/commit/872b085f12f56da25a2dbd9bd0b2dff31d5aea63]
+CVE: CVE-2020-12049
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ dbus/dbus-sysdeps-unix.c | 32 ++++++++++++++++++++------------
+ 1 file changed, 20 insertions(+), 12 deletions(-)
+
+diff --git a/dbus/dbus-sysdeps-unix.c b/dbus/dbus-sysdeps-unix.c
+index b5fc2466..b176dae1 100644
+--- a/dbus/dbus-sysdeps-unix.c
++++ b/dbus/dbus-sysdeps-unix.c
+@@ -435,18 +435,6 @@ _dbus_read_socket_with_unix_fds (DBusSocket fd,
+ struct cmsghdr *cm;
+ dbus_bool_t found = FALSE;
+
+- if (m.msg_flags & MSG_CTRUNC)
+- {
+- /* Hmm, apparently the control data was truncated. The bad
+- thing is that we might have completely lost a couple of fds
+- without chance to recover them. Hence let's treat this as a
+- serious error. */
+-
+- errno = ENOSPC;
+- _dbus_string_set_length (buffer, start);
+- return -1;
+- }
+-
+ for (cm = CMSG_FIRSTHDR(&m); cm; cm = CMSG_NXTHDR(&m, cm))
+ if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS)
+ {
+@@ -501,6 +489,26 @@ _dbus_read_socket_with_unix_fds (DBusSocket fd,
+ if (!found)
+ *n_fds = 0;
+
++ if (m.msg_flags & MSG_CTRUNC)
++ {
++ unsigned int i;
++
++ /* Hmm, apparently the control data was truncated. The bad
++ thing is that we might have completely lost a couple of fds
++ without chance to recover them. Hence let's treat this as a
++ serious error. */
++
++ /* We still need to close whatever fds we *did* receive,
++ * otherwise they'll never get closed. (CVE-2020-12049) */
++ for (i = 0; i < *n_fds; i++)
++ close (fds[i]);
++
++ *n_fds = 0;
++ errno = ENOSPC;
++ _dbus_string_set_length (buffer, start);
++ return -1;
++ }
++
+ /* put length back (doesn't actually realloc) */
+ _dbus_string_set_length (buffer, start + bytes_read);
+
+--
+2.25.1
+
diff --git a/meta/recipes-core/dbus/dbus_1.12.16.bb b/meta/recipes-core/dbus/dbus_1.12.16.bb
index cfdbec09d0..92508cbeb8 100644
--- a/meta/recipes-core/dbus/dbus_1.12.16.bb
+++ b/meta/recipes-core/dbus/dbus_1.12.16.bb
@@ -16,6 +16,7 @@ SRC_URI = "https://dbus.freedesktop.org/releases/dbus/dbus-${PV}.tar.gz \
file://tmpdir.patch \
file://dbus-1.init \
file://clear-guid_from_server-if-send_negotiate_unix_f.patch \
+ file://CVE-2020-12049.patch \
"
SRC_URI[md5sum] = "2dbeae80dfc9e3632320c6a53d5e8890"
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/0020-meson.build-do-not-hardcode-linux-as-the-host-system.patch b/meta/recipes-core/glib-2.0/glib-2.0/0020-meson.build-do-not-hardcode-linux-as-the-host-system.patch
new file mode 100644
index 0000000000..9c311f1c90
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/0020-meson.build-do-not-hardcode-linux-as-the-host-system.patch
@@ -0,0 +1,49 @@
+From d5e82cd0b6076f33b86e0285ef1c0dba8a14112e Mon Sep 17 00:00:00 2001
+From: Ahmad Fatoum <a.fatoum@pengutronix.de>
+Date: Thu, 9 Jul 2020 13:00:16 +0200
+Subject: [PATCH] meson.build: do not hardcode 'linux' as the host system
+
+OE build system can set this to other values that include 'linux',
+e.g. 'linux-gnueabi'. This led to glib always being built without
+libmount, mkostemp and selinux support.
+
+Upstream-Status: Inappropriate [other]
+Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
+---
+ meson.build | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/meson.build b/meson.build
+index dd95c750b5ea..8bcacaf3c7e1 100644
+--- a/meson.build
++++ b/meson.build
+@@ -604,7 +604,7 @@ else
+ endif
+ message('Checking whether to use statfs or statvfs .. ' + stat_func_to_use)
+
+-if host_system == 'linux'
++if host_system.contains('linux')
+ if cc.has_function('mkostemp',
+ prefix: '''#define _GNU_SOURCE
+ #include <stdlib.h>''')
+@@ -1810,7 +1810,7 @@ glib_conf.set_quoted('GLIB_LOCALE_DIR', join_paths(glib_datadir, 'locale'))
+ # libmount is only used by gio, but we need to fetch the libs to generate the
+ # pkg-config file below
+ libmount_dep = []
+-if host_system == 'linux' and get_option('libmount')
++if host_system.contains('linux') and get_option('libmount')
+ libmount_dep = [dependency('mount', version : '>=2.23', required : true)]
+ glib_conf.set('HAVE_LIBMOUNT', 1)
+ endif
+@@ -1820,7 +1820,7 @@ if host_system == 'windows'
+ endif
+
+ selinux_dep = []
+-if host_system == 'linux'
++if host_system.contains('linux')
+ selinux_dep = dependency('libselinux', required: get_option('selinux'))
+
+ glib_conf.set('HAVE_SELINUX', selinux_dep.found())
+--
+2.27.0
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2020-6750.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2020-6750.patch
new file mode 100644
index 0000000000..6db3934978
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2020-6750.patch
@@ -0,0 +1,741 @@
+From 747f2c646f5a86ac58ad59be08036e81388e971d Mon Sep 17 00:00:00 2001
+From: Patrick Griffis <tingping@tingping.se>
+Date: Thu, 23 Jan 2020 19:58:41 -0800
+Subject: [PATCH] Refactor g_socket_client_connect_async()
+
+This is a fairly large refactoring. The highlights are:
+
+- Removing in-progress connections/addresses from GSocketClientAsyncConnectData:
+
+ This caused issues where multiple ConnectionAttempt's would step over eachother
+ and modify shared state causing bugs like accidentally bypassing a set proxy.
+
+ Fixes #1871
+ Fixes #1989
+ Fixes #1902
+
+- Cancelling address enumeration on error/completion
+
+- Queuing successful TCP connections and doing application layer work serially:
+
+ This is more in the spirit of Happy Eyeballs but it also greatly simplifies
+ the flow of connection handling so fewer tasks are happening in parallel
+ when they don't need to be.
+
+ The behavior also should more closely match that of g_socket_client_connect().
+
+- Better track the state of address enumeration:
+
+ Previously we were over eager to treat enumeration finishing as an error.
+
+ Fixes #1872
+ See also #1982
+
+- Add more detailed documentation and logging.
+
+Closes #1995
+
+CVE: CVE-2020-6750
+
+Upstream-Status: Backport [ https://gitlab.gnome.org/GNOME/glib.git;
+commit=2722620e3291b930a3a228100d7c0e07b69534e3 ]
+
+Signed-off-by: Haiqing Bai <Haiqing.Bai@windriver.com>
+---
+ gio/gsocketclient.c | 459 ++++++++++++++++++++++++++++----------------
+ 1 file changed, 296 insertions(+), 163 deletions(-)
+
+diff --git a/gio/gsocketclient.c b/gio/gsocketclient.c
+index 81767c0..b1d5f6c 100644
+--- a/gio/gsocketclient.c
++++ b/gio/gsocketclient.c
+@@ -1332,13 +1332,15 @@ typedef struct
+
+ GSocketConnectable *connectable;
+ GSocketAddressEnumerator *enumerator;
+- GProxyAddress *proxy_addr;
+- GSocket *socket;
+- GIOStream *connection;
++ GCancellable *enumeration_cancellable;
+
+ GSList *connection_attempts;
++ GSList *successful_connections;
+ GError *last_error;
+
++ gboolean enumerated_at_least_once;
++ gboolean enumeration_completed;
++ gboolean connection_in_progress;
+ gboolean completed;
+ } GSocketClientAsyncConnectData;
+
+@@ -1350,10 +1352,9 @@ g_socket_client_async_connect_data_free (GSocketClientAsyncConnectData *data)
+ data->task = NULL;
+ g_clear_object (&data->connectable);
+ g_clear_object (&data->enumerator);
+- g_clear_object (&data->proxy_addr);
+- g_clear_object (&data->socket);
+- g_clear_object (&data->connection);
++ g_clear_object (&data->enumeration_cancellable);
+ g_slist_free_full (data->connection_attempts, connection_attempt_unref);
++ g_slist_free_full (data->successful_connections, connection_attempt_unref);
+
+ g_clear_error (&data->last_error);
+
+@@ -1365,6 +1366,7 @@ typedef struct
+ GSocketAddress *address;
+ GSocket *socket;
+ GIOStream *connection;
++ GProxyAddress *proxy_addr;
+ GSocketClientAsyncConnectData *data; /* unowned */
+ GSource *timeout_source;
+ GCancellable *cancellable;
+@@ -1396,6 +1398,7 @@ connection_attempt_unref (gpointer pointer)
+ g_clear_object (&attempt->socket);
+ g_clear_object (&attempt->connection);
+ g_clear_object (&attempt->cancellable);
++ g_clear_object (&attempt->proxy_addr);
+ if (attempt->timeout_source)
+ {
+ g_source_destroy (attempt->timeout_source);
+@@ -1413,37 +1416,59 @@ connection_attempt_remove (ConnectionAttempt *attempt)
+ }
+
+ static void
+-g_socket_client_async_connect_complete (GSocketClientAsyncConnectData *data)
++cancel_all_attempts (GSocketClientAsyncConnectData *data)
+ {
+- g_assert (data->connection);
++ GSList *l;
+
+- if (!G_IS_SOCKET_CONNECTION (data->connection))
++ for (l = data->connection_attempts; l; l = g_slist_next (l))
+ {
+- GSocketConnection *wrapper_connection;
+-
+- wrapper_connection = g_tcp_wrapper_connection_new (data->connection, data->socket);
+- g_object_unref (data->connection);
+- data->connection = (GIOStream *)wrapper_connection;
++ ConnectionAttempt *attempt_entry = l->data;
++ g_cancellable_cancel (attempt_entry->cancellable);
++ connection_attempt_unref (attempt_entry);
+ }
++ g_slist_free (data->connection_attempts);
++ data->connection_attempts = NULL;
+
+- if (!data->completed)
++ g_slist_free_full (data->successful_connections, connection_attempt_unref);
++ data->successful_connections = NULL;
++
++ g_cancellable_cancel (data->enumeration_cancellable);
++}
++
++static void
++g_socket_client_async_connect_complete (ConnectionAttempt *attempt)
++{
++ GSocketClientAsyncConnectData *data = attempt->data;
++ GError *error = NULL;
++ g_assert (attempt->connection);
++ g_assert (!data->completed);
++
++ if (!G_IS_SOCKET_CONNECTION (attempt->connection))
+ {
+- GError *error = NULL;
++ GSocketConnection *wrapper_connection;
+
+- if (g_cancellable_set_error_if_cancelled (g_task_get_cancellable (data->task), &error))
+- {
+- g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_COMPLETE, data->connectable, NULL);
+- g_task_return_error (data->task, g_steal_pointer (&error));
+- }
+- else
+- {
+- g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_COMPLETE, data->connectable, data->connection);
+- g_task_return_pointer (data->task, g_steal_pointer (&data->connection), g_object_unref);
+- }
++ wrapper_connection = g_tcp_wrapper_connection_new (attempt->connection, attempt->socket);
++ g_object_unref (attempt->connection);
++ attempt->connection = (GIOStream *)wrapper_connection;
++ }
+
+- data->completed = TRUE;
++ data->completed = TRUE;
++ cancel_all_attempts (data);
++
++ if (g_cancellable_set_error_if_cancelled (g_task_get_cancellable (data->task), &error))
++ {
++ g_debug ("GSocketClient: Connection cancelled!");
++ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_COMPLETE, data->connectable, NULL);
++ g_task_return_error (data->task, g_steal_pointer (&error));
++ }
++ else
++ {
++ g_debug ("GSocketClient: Connection successful!");
++ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_COMPLETE, data->connectable, attempt->connection);
++ g_task_return_pointer (data->task, g_steal_pointer (&attempt->connection), g_object_unref);
+ }
+
++ connection_attempt_unref (attempt);
+ g_object_unref (data->task);
+ }
+
+@@ -1465,59 +1490,63 @@ static void
+ enumerator_next_async (GSocketClientAsyncConnectData *data,
+ gboolean add_task_ref)
+ {
+- /* We need to cleanup the state */
+- g_clear_object (&data->socket);
+- g_clear_object (&data->proxy_addr);
+- g_clear_object (&data->connection);
+-
+ /* Each enumeration takes a ref. This arg just avoids repeated unrefs when
+ an enumeration starts another enumeration */
+ if (add_task_ref)
+ g_object_ref (data->task);
+
+ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_RESOLVING, data->connectable, NULL);
++ g_debug ("GSocketClient: Starting new address enumeration");
+ g_socket_address_enumerator_next_async (data->enumerator,
+- g_task_get_cancellable (data->task),
++ data->enumeration_cancellable,
+ g_socket_client_enumerator_callback,
+ data);
+ }
+
++static void try_next_connection_or_finish (GSocketClientAsyncConnectData *, gboolean);
++
+ static void
+ g_socket_client_tls_handshake_callback (GObject *object,
+ GAsyncResult *result,
+ gpointer user_data)
+ {
+- GSocketClientAsyncConnectData *data = user_data;
++ ConnectionAttempt *attempt = user_data;
++ GSocketClientAsyncConnectData *data = attempt->data;
+
+ if (g_tls_connection_handshake_finish (G_TLS_CONNECTION (object),
+ result,
+ &data->last_error))
+ {
+- g_object_unref (data->connection);
+- data->connection = G_IO_STREAM (object);
++ g_object_unref (attempt->connection);
++ attempt->connection = G_IO_STREAM (object);
+
+- g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_TLS_HANDSHAKED, data->connectable, data->connection);
+- g_socket_client_async_connect_complete (data);
++ g_debug ("GSocketClient: TLS handshake succeeded");
++ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_TLS_HANDSHAKED, data->connectable, attempt->connection);
++ g_socket_client_async_connect_complete (attempt);
+ }
+ else
+ {
+ g_object_unref (object);
+- enumerator_next_async (data, FALSE);
++ connection_attempt_unref (attempt);
++ g_debug ("GSocketClient: TLS handshake failed: %s", data->last_error->message);
++ try_next_connection_or_finish (data, TRUE);
+ }
+ }
+
+ static void
+-g_socket_client_tls_handshake (GSocketClientAsyncConnectData *data)
++g_socket_client_tls_handshake (ConnectionAttempt *attempt)
+ {
++ GSocketClientAsyncConnectData *data = attempt->data;
+ GIOStream *tlsconn;
+
+ if (!data->client->priv->tls)
+ {
+- g_socket_client_async_connect_complete (data);
++ g_socket_client_async_connect_complete (attempt);
+ return;
+ }
+
+- tlsconn = g_tls_client_connection_new (data->connection,
++ g_debug ("GSocketClient: Starting TLS handshake");
++ tlsconn = g_tls_client_connection_new (attempt->connection,
+ data->connectable,
+ &data->last_error);
+ if (tlsconn)
+@@ -1529,11 +1558,12 @@ g_socket_client_tls_handshake (GSocketClientAsyncConnectData *data)
+ G_PRIORITY_DEFAULT,
+ g_task_get_cancellable (data->task),
+ g_socket_client_tls_handshake_callback,
+- data);
++ attempt);
+ }
+ else
+ {
+- enumerator_next_async (data, FALSE);
++ connection_attempt_unref (attempt);
++ try_next_connection_or_finish (data, TRUE);
+ }
+ }
+
+@@ -1542,23 +1572,38 @@ g_socket_client_proxy_connect_callback (GObject *object,
+ GAsyncResult *result,
+ gpointer user_data)
+ {
+- GSocketClientAsyncConnectData *data = user_data;
++ ConnectionAttempt *attempt = user_data;
++ GSocketClientAsyncConnectData *data = attempt->data;
+
+- g_object_unref (data->connection);
+- data->connection = g_proxy_connect_finish (G_PROXY (object),
+- result,
+- &data->last_error);
+- if (data->connection)
++ g_object_unref (attempt->connection);
++ attempt->connection = g_proxy_connect_finish (G_PROXY (object),
++ result,
++ &data->last_error);
++ if (attempt->connection)
+ {
+- g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_PROXY_NEGOTIATED, data->connectable, data->connection);
++ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_PROXY_NEGOTIATED, data->connectable, attempt->connection);
+ }
+ else
+ {
+- enumerator_next_async (data, FALSE);
++ connection_attempt_unref (attempt);
++ try_next_connection_or_finish (data, TRUE);
+ return;
+ }
+
+- g_socket_client_tls_handshake (data);
++ g_socket_client_tls_handshake (attempt);
++}
++
++static void
++complete_connection_with_error (GSocketClientAsyncConnectData *data,
++ GError *error)
++{
++ g_debug ("GSocketClient: Connection failed: %s", error->message);
++ g_assert (!data->completed);
++
++ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_COMPLETE, data->connectable, NULL);
++ data->completed = TRUE;
++ cancel_all_attempts (data);
++ g_task_return_error (data->task, error);
+ }
+
+ static gboolean
+@@ -1572,15 +1617,114 @@ task_completed_or_cancelled (GSocketClientAsyncConnectData *data)
+ return TRUE;
+ else if (g_cancellable_set_error_if_cancelled (cancellable, &error))
+ {
+- g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_COMPLETE, data->connectable, NULL);
+- g_task_return_error (task, g_steal_pointer (&error));
+- data->completed = TRUE;
++ complete_connection_with_error (data, g_steal_pointer (&error));
+ return TRUE;
+ }
+ else
+ return FALSE;
+ }
+
++static gboolean
++try_next_successful_connection (GSocketClientAsyncConnectData *data)
++{
++ ConnectionAttempt *attempt;
++ const gchar *protocol;
++ GProxy *proxy;
++
++ if (data->connection_in_progress)
++ return FALSE;
++
++ g_assert (data->successful_connections != NULL);
++ attempt = data->successful_connections->data;
++ g_assert (attempt != NULL);
++ data->successful_connections = g_slist_remove (data->successful_connections, attempt);
++ data->connection_in_progress = TRUE;
++
++ g_debug ("GSocketClient: Starting application layer connection");
++
++ if (!attempt->proxy_addr)
++ {
++ g_socket_client_tls_handshake (g_steal_pointer (&attempt));
++ return TRUE;
++ }
++
++ protocol = g_proxy_address_get_protocol (attempt->proxy_addr);
++
++ /* The connection should not be anything other than TCP,
++ * but let's put a safety guard in case
++ */
++ if (!G_IS_TCP_CONNECTION (attempt->connection))
++ {
++ g_critical ("Trying to proxy over non-TCP connection, this is "
++ "most likely a bug in GLib IO library.");
++
++ g_set_error_literal (&data->last_error,
++ G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED,
++ _("Proxying over a non-TCP connection is not supported."));
++ }
++ else if (g_hash_table_contains (data->client->priv->app_proxies, protocol))
++ {
++ /* Simply complete the connection, we don't want to do TLS handshake
++ * as the application proxy handling may need proxy handshake first */
++ g_socket_client_async_connect_complete (g_steal_pointer (&attempt));
++ return TRUE;
++ }
++ else if ((proxy = g_proxy_get_default_for_protocol (protocol)))
++ {
++ GIOStream *connection = attempt->connection;
++ GProxyAddress *proxy_addr = attempt->proxy_addr;
++
++ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_PROXY_NEGOTIATING, data->connectable, attempt->connection);
++ g_debug ("GSocketClient: Starting proxy connection");
++ g_proxy_connect_async (proxy,
++ connection,
++ proxy_addr,
++ g_task_get_cancellable (data->task),
++ g_socket_client_proxy_connect_callback,
++ g_steal_pointer (&attempt));
++ g_object_unref (proxy);
++ return TRUE;
++ }
++ else
++ {
++ g_clear_error (&data->last_error);
++
++ g_set_error (&data->last_error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED,
++ _("Proxy protocol “%s” is not supported."),
++ protocol);
++ }
++
++ data->connection_in_progress = FALSE;
++ g_clear_pointer (&attempt, connection_attempt_unref);
++ return FALSE; /* All non-return paths are failures */
++}
++
++static void
++try_next_connection_or_finish (GSocketClientAsyncConnectData *data,
++ gboolean end_current_connection)
++{
++ if (end_current_connection)
++ data->connection_in_progress = FALSE;
++
++ if (data->connection_in_progress)
++ return;
++
++ /* Keep trying successful connections until one works, each iteration pops one */
++ while (data->successful_connections)
++ {
++ if (try_next_successful_connection (data))
++ return;
++ }
++
++ if (!data->enumeration_completed)
++ {
++ enumerator_next_async (data, FALSE);
++ return;
++ }
++
++ complete_connection_with_error (data, data->last_error);
++}
++
+ static void
+ g_socket_client_connected_callback (GObject *source,
+ GAsyncResult *result,
+@@ -1588,10 +1732,7 @@ g_socket_client_connected_callback (GObject *source,
+ {
+ ConnectionAttempt *attempt = user_data;
+ GSocketClientAsyncConnectData *data = attempt->data;
+- GSList *l;
+ GError *error = NULL;
+- GProxy *proxy;
+- const gchar *protocol;
+
+ if (task_completed_or_cancelled (data) || g_cancellable_is_cancelled (attempt->cancellable))
+ {
+@@ -1613,11 +1754,12 @@ g_socket_client_connected_callback (GObject *source,
+ {
+ clarify_connect_error (error, data->connectable, attempt->address);
+ set_last_error (data, error);
++ g_debug ("GSocketClient: Connection attempt failed: %s", error->message);
+ connection_attempt_remove (attempt);
+- enumerator_next_async (data, FALSE);
+ connection_attempt_unref (attempt);
++ try_next_connection_or_finish (data, FALSE);
+ }
+- else
++ else /* Silently ignore cancelled attempts */
+ {
+ g_clear_error (&error);
+ g_object_unref (data->task);
+@@ -1627,74 +1769,21 @@ g_socket_client_connected_callback (GObject *source,
+ return;
+ }
+
+- data->socket = g_steal_pointer (&attempt->socket);
+- data->connection = g_steal_pointer (&attempt->connection);
+-
+- for (l = data->connection_attempts; l; l = g_slist_next (l))
+- {
+- ConnectionAttempt *attempt_entry = l->data;
+- g_cancellable_cancel (attempt_entry->cancellable);
+- connection_attempt_unref (attempt_entry);
+- }
+- g_slist_free (data->connection_attempts);
+- data->connection_attempts = NULL;
+- connection_attempt_unref (attempt);
+-
+- g_socket_connection_set_cached_remote_address ((GSocketConnection*)data->connection, NULL);
+- g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_CONNECTED, data->connectable, data->connection);
++ g_socket_connection_set_cached_remote_address ((GSocketConnection*)attempt->connection, NULL);
++ g_debug ("GSocketClient: TCP connection successful");
++ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_CONNECTED, data->connectable, attempt->connection);
+
+ /* wrong, but backward compatible */
+- g_socket_set_blocking (data->socket, TRUE);
++ g_socket_set_blocking (attempt->socket, TRUE);
+
+- if (!data->proxy_addr)
+- {
+- g_socket_client_tls_handshake (data);
+- return;
+- }
+-
+- protocol = g_proxy_address_get_protocol (data->proxy_addr);
+-
+- /* The connection should not be anything other than TCP,
+- * but let's put a safety guard in case
++ /* This ends the parallel "happy eyeballs" portion of connecting.
++ Now that we have a successful tcp connection we will attempt to connect
++ at the TLS/Proxy layer. If those layers fail we will move on to the next
++ connection.
+ */
+- if (!G_IS_TCP_CONNECTION (data->connection))
+- {
+- g_critical ("Trying to proxy over non-TCP connection, this is "
+- "most likely a bug in GLib IO library.");
+-
+- g_set_error_literal (&data->last_error,
+- G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED,
+- _("Proxying over a non-TCP connection is not supported."));
+-
+- enumerator_next_async (data, FALSE);
+- }
+- else if (g_hash_table_contains (data->client->priv->app_proxies, protocol))
+- {
+- /* Simply complete the connection, we don't want to do TLS handshake
+- * as the application proxy handling may need proxy handshake first */
+- g_socket_client_async_connect_complete (data);
+- }
+- else if ((proxy = g_proxy_get_default_for_protocol (protocol)))
+- {
+- g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_PROXY_NEGOTIATING, data->connectable, data->connection);
+- g_proxy_connect_async (proxy,
+- data->connection,
+- data->proxy_addr,
+- g_task_get_cancellable (data->task),
+- g_socket_client_proxy_connect_callback,
+- data);
+- g_object_unref (proxy);
+- }
+- else
+- {
+- g_clear_error (&data->last_error);
+-
+- g_set_error (&data->last_error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED,
+- _("Proxy protocol “%s” is not supported."),
+- protocol);
+-
+- enumerator_next_async (data, FALSE);
+- }
++ connection_attempt_remove (attempt);
++ data->successful_connections = g_slist_append (data->successful_connections, g_steal_pointer (&attempt));
++ try_next_connection_or_finish (data, FALSE);
+ }
+
+ static gboolean
+@@ -1702,7 +1791,11 @@ on_connection_attempt_timeout (gpointer data)
+ {
+ ConnectionAttempt *attempt = data;
+
+- enumerator_next_async (attempt->data, TRUE);
++ if (!attempt->data->enumeration_completed)
++ {
++ g_debug ("GSocketClient: Timeout reached, trying another enumeration");
++ enumerator_next_async (attempt->data, TRUE);
++ }
+
+ g_clear_pointer (&attempt->timeout_source, g_source_unref);
+ return G_SOURCE_REMOVE;
+@@ -1712,9 +1805,9 @@ static void
+ on_connection_cancelled (GCancellable *cancellable,
+ gpointer data)
+ {
+- GCancellable *attempt_cancellable = data;
++ GCancellable *linked_cancellable = G_CANCELLABLE (data);
+
+- g_cancellable_cancel (attempt_cancellable);
++ g_cancellable_cancel (linked_cancellable);
+ }
+
+ static void
+@@ -1738,39 +1831,49 @@ g_socket_client_enumerator_callback (GObject *object,
+ result, &error);
+ if (address == NULL)
+ {
+- if (data->connection_attempts)
++ if (G_UNLIKELY (data->enumeration_completed))
++ return;
++
++ data->enumeration_completed = TRUE;
++ g_debug ("GSocketClient: Address enumeration completed (out of addresses)");
++
++ /* As per API docs: We only care about error if its the first call,
++ after that the enumerator is done.
++
++ Note that we don't care about cancellation errors because
++ task_completed_or_cancelled() above should handle that.
++
++ If this fails and nothing is in progress then we will complete task here.
++ */
++ if ((data->enumerated_at_least_once && !data->connection_attempts && !data->connection_in_progress) ||
++ !data->enumerated_at_least_once)
+ {
+- g_object_unref (data->task);
+- return;
++ g_debug ("GSocketClient: Address enumeration failed: %s", error ? error->message : NULL);
++ if (data->last_error)
++ {
++ g_clear_error (&error);
++ error = data->last_error;
++ data->last_error = NULL;
++ }
++ else if (!error)
++ {
++ g_set_error_literal (&error, G_IO_ERROR, G_IO_ERROR_FAILED,
++ _("Unknown error on connect"));
++ }
++
++ complete_connection_with_error (data, error);
+ }
+
+- g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_COMPLETE, data->connectable, NULL);
+- data->completed = TRUE;
+- if (!error)
+- {
+- if (data->last_error)
+- {
+- error = data->last_error;
+- data->last_error = NULL;
+- }
+- else
+- {
+- g_set_error_literal (&error, G_IO_ERROR, G_IO_ERROR_FAILED,
+- _("Unknown error on connect"));
+- }
+- }
+- g_task_return_error (data->task, error);
++ /* Enumeration should never trigger again, drop our ref */
+ g_object_unref (data->task);
+ return;
+ }
+
++ data->enumerated_at_least_once = TRUE;
++ g_debug ("GSocketClient: Address enumeration succeeded");
+ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_RESOLVED,
+ data->connectable, NULL);
+
+- if (G_IS_PROXY_ADDRESS (address) &&
+- data->client->priv->enable_proxy)
+- data->proxy_addr = g_object_ref (G_PROXY_ADDRESS (address));
+-
+ g_clear_error (&data->last_error);
+
+ socket = create_socket (data->client, address, &data->last_error);
+@@ -1788,6 +1891,10 @@ g_socket_client_enumerator_callback (GObject *object,
+ attempt->cancellable = g_cancellable_new ();
+ attempt->connection = (GIOStream *)g_socket_connection_factory_create_connection (socket);
+ attempt->timeout_source = g_timeout_source_new (HAPPY_EYEBALLS_CONNECTION_ATTEMPT_TIMEOUT_MS);
++
++ if (G_IS_PROXY_ADDRESS (address) && data->client->priv->enable_proxy)
++ attempt->proxy_addr = g_object_ref (G_PROXY_ADDRESS (address));
++
+ g_source_set_callback (attempt->timeout_source, on_connection_attempt_timeout, attempt, NULL);
+ g_source_attach (attempt->timeout_source, g_main_context_get_thread_default ());
+ data->connection_attempts = g_slist_append (data->connection_attempts, attempt);
+@@ -1797,6 +1904,7 @@ g_socket_client_enumerator_callback (GObject *object,
+ g_object_ref (attempt->cancellable), g_object_unref);
+
+ g_socket_connection_set_cached_remote_address ((GSocketConnection *)attempt->connection, address);
++ g_debug ("GSocketClient: Starting TCP connection attempt");
+ g_socket_client_emit_event (data->client, G_SOCKET_CLIENT_CONNECTING, data->connectable, attempt->connection);
+ g_socket_connection_connect_async (G_SOCKET_CONNECTION (attempt->connection),
+ address,
+@@ -1849,24 +1957,48 @@ g_socket_client_connect_async (GSocketClient *client,
+ else
+ data->enumerator = g_socket_connectable_enumerate (connectable);
+
+- /* The flow and ownership here isn't quite obvious:
+- - The task starts an async attempt to connect.
+- - Each attempt holds a single ref on task.
+- - Each attempt may create new attempts by timing out (not a failure) so
+- there are multiple attempts happening in parallel.
+- - Upon failure an attempt will start a new attempt that steals its ref
+- until there are no more attempts left and it drops its ref.
+- - Upon success it will cancel all other attempts and continue on
+- to the rest of the connection (tls, proxies, etc) which do not
+- happen in parallel and at the very end drop its ref.
+- - Upon cancellation an attempt drops its ref.
+- */
++ /* This function tries to match the behavior of g_socket_client_connect ()
++ which is simple enough but much of it is done in parallel to be as responsive
++ as possible as per Happy Eyeballs (RFC 8305). This complicates flow quite a
++ bit but we can describe it in 3 sections:
++
++ Firstly we have address enumeration (DNS):
++ - This may be triggered multiple times by enumerator_next_async().
++ - It also has its own cancellable (data->enumeration_cancellable).
++ - Enumeration is done lazily because GNetworkAddressAddressEnumerator
++ also does work in parallel and may lazily add new addresses.
++ - If the first enumeration errors then the task errors. Otherwise all enumerations
++ will potentially be used (until task or enumeration is cancelled).
++
++ Then we start attempting connections (TCP):
++ - Each connection is independent and kept in a ConnectionAttempt object.
++ - They each hold a ref on the main task and have their own cancellable.
++ - Multiple attempts may happen in parallel as per Happy Eyeballs.
++ - Upon failure or timeouts more connection attempts are made.
++ - If no connections succeed the task errors.
++ - Upon success they are kept in a list of successful connections.
++
++ Lastly we connect at the application layer (TLS, Proxies):
++ - These are done in serial.
++ - The reasoning here is that Happy Eyeballs is about making bad connections responsive
++ at the IP/TCP layers. Issues at the application layer are generally not due to
++ connectivity issues but rather misconfiguration.
++ - Upon failure it will try the next TCP connection until it runs out and
++ the task errors.
++ - Upon success it cancels everything remaining (enumeration and connections)
++ and returns the connection.
++ */
+
+ data->task = g_task_new (client, cancellable, callback, user_data);
+ g_task_set_check_cancellable (data->task, FALSE); /* We handle this manually */
+ g_task_set_source_tag (data->task, g_socket_client_connect_async);
+ g_task_set_task_data (data->task, data, (GDestroyNotify)g_socket_client_async_connect_data_free);
+
++ data->enumeration_cancellable = g_cancellable_new ();
++ if (cancellable)
++ g_cancellable_connect (cancellable, G_CALLBACK (on_connection_cancelled),
++ g_object_ref (data->enumeration_cancellable), g_object_unref);
++
+ enumerator_next_async (data, FALSE);
+ }
+
+@@ -1985,6 +2117,7 @@ g_socket_client_connect_to_uri_async (GSocketClient *client,
+ }
+ else
+ {
++ g_debug("g_socket_client_connect_to_uri_async");
+ g_socket_client_connect_async (client,
+ connectable, cancellable,
+ callback, user_data);
+--
+2.23.0
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0_2.60.7.bb b/meta/recipes-core/glib-2.0/glib-2.0_2.60.7.bb
index 5aefa6ad8b..af8ded76d5 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0_2.60.7.bb
+++ b/meta/recipes-core/glib-2.0/glib-2.0_2.60.7.bb
@@ -16,6 +16,8 @@ SRC_URI = "${GNOME_MIRROR}/glib/${SHRT_VER}/glib-${PV}.tar.xz \
file://0001-Do-not-write-bindir-into-pkg-config-files.patch \
file://0001-meson.build-do-not-hardcode-linux-as-the-host-system.patch \
file://0001-meson-do-a-build-time-check-for-strlcpy-before-attem.patch \
+ file://CVE-2020-6750.patch \
+ file://0020-meson.build-do-not-hardcode-linux-as-the-host-system.patch \
"
SRC_URI_append_class-native = " file://relocate-modules.patch"
diff --git a/meta/recipes-core/glibc/glibc-testsuite_2.30.bb b/meta/recipes-core/glibc/glibc-testsuite_2.30.bb
index 657fd4dbc1..d887aeff79 100644
--- a/meta/recipes-core/glibc/glibc-testsuite_2.30.bb
+++ b/meta/recipes-core/glibc/glibc-testsuite_2.30.bb
@@ -1,5 +1,7 @@
require glibc_${PV}.bb
+EXCLUDE_FROM_WORLD = "1"
+
# handle PN differences
FILESEXTRAPATHS_prepend := "${THISDIR}/glibc:"
@@ -58,3 +60,4 @@ addtask do_check after do_compile
inherit nopackages
deltask do_stash_locale
+deltask do_install
diff --git a/meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Make-relocatable-install-for-locales.patch b/meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Make-relocatable-install-for-locales.patch
index 3aad603ada..5cd235f6ac 100644
--- a/meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Make-relocatable-install-for-locales.patch
+++ b/meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Make-relocatable-install-for-locales.patch
@@ -65,6 +65,35 @@ index 7c1cc3eecb..53cb8bfc59 100644
/* Load the locale data for CATEGORY from the file specified by *NAME.
If *NAME is "", use environment variables as specified by POSIX, and
---
-2.22.0
-
+Index: git/locale/programs/locale.c
+===================================================================
+--- git.orig/locale/programs/locale.c
++++ git/locale/programs/locale.c
+@@ -632,6 +632,7 @@ nameentcmp (const void *a, const void *b
+ ((const struct nameent *) b)->name);
+ }
+
++static char _write_archive_locales_path[4096] attribute_hidden __attribute__ ((section (".gccrelocprefix"))) = ARCHIVE_NAME;
+
+ static int
+ write_archive_locales (void **all_datap, char *linebuf)
+@@ -645,7 +646,7 @@ write_archive_locales (void **all_datap,
+ int fd, ret = 0;
+ uint32_t cnt;
+
+- fd = open64 (ARCHIVE_NAME, O_RDONLY);
++ fd = open64 (_write_archive_locales_path, O_RDONLY);
+ if (fd < 0)
+ return 0;
+
+@@ -700,8 +701,8 @@ write_archive_locales (void **all_datap,
+ if (cnt)
+ putchar_unlocked ('\n');
+
+- printf ("locale: %-15.15s archive: " ARCHIVE_NAME "\n%s\n",
+- names[cnt].name, linebuf);
++ printf ("locale: %-15.15s archive: %s\n%s\n",
++ names[cnt].name, _write_archive_locales_path, linebuf);
+
+ locrec = (struct locrecent *) (addr + names[cnt].locrec_offset);
+
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-10029.patch b/meta/recipes-core/glibc/glibc/CVE-2020-10029.patch
new file mode 100644
index 0000000000..606b691bcf
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-10029.patch
@@ -0,0 +1,128 @@
+From ce265ec5bc25ec35fba53807abac1b0c8469895e Mon Sep 17 00:00:00 2001
+From: Joseph Myers <joseph@codesourcery.com>
+Date: Wed, 12 Feb 2020 23:31:56 +0000
+Subject: [PATCH] Avoid ldbl-96 stack corruption from range reduction of
+
+ pseudo-zero (bug 25487).
+
+Bug 25487 reports stack corruption in ldbl-96 sinl on a pseudo-zero
+argument (an representation where all the significand bits, including
+the explicit high bit, are zero, but the exponent is not zero, which
+is not a valid representation for the long double type).
+
+Although this is not a valid long double representation, existing
+practice in this area (see bug 4586, originally marked invalid but
+subsequently fixed) is that we still seek to avoid invalid memory
+accesses as a result, in case of programs that treat arbitrary binary
+data as long double representations, although the invalid
+representations of the ldbl-96 format do not need to be consistently
+handled the same as any particular valid representation.
+
+This patch makes the range reduction detect pseudo-zero and unnormal
+representations that would otherwise go to __kernel_rem_pio2, and
+returns a NaN for them instead of continuing with the range reduction
+process. (Pseudo-zero and unnormal representations whose unbiased
+exponent is less than -1 have already been safely returned from the
+function before this point without going through the rest of range
+reduction.) Pseudo-zero representations would previously result in
+the value passed to __kernel_rem_pio2 being all-zero, which is
+definitely unsafe; unnormal representations would previously result in
+a value passed whose high bit is zero, which might well be unsafe
+since that is not a form of input expected by __kernel_rem_pio2.
+
+Tested for x86_64.
+
+CVE: CVE-2020-10029
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=glibc.git;
+a=patch;h=9333498794cde1d5cca518badf79533a24114b6f]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+
+---
+ sysdeps/ieee754/ldbl-96/Makefile | 3 ++-
+ sysdeps/ieee754/ldbl-96/e_rem_pio2l.c | 12 +++++++++
+ sysdeps/ieee754/ldbl-96/test-sinl-pseudo.c | 41 ++++++++++++++++++++++++++++++
+ 3 files changed, 55 insertions(+), 1 deletion(-)
+ create mode 100644 sysdeps/ieee754/ldbl-96/test-sinl-pseudo.c
+
+diff --git a/sysdeps/ieee754/ldbl-96/Makefile b/sysdeps/ieee754/ldbl-96/Makefile
+index b103254..052c1c7 100644
+--- a/sysdeps/ieee754/ldbl-96/Makefile
++++ b/sysdeps/ieee754/ldbl-96/Makefile
+@@ -17,5 +17,6 @@
+ # <http://www.gnu.org/licenses/>.
+
+ ifeq ($(subdir),math)
+-tests += test-canonical-ldbl-96 test-totalorderl-ldbl-96
++tests += test-canonical-ldbl-96 test-totalorderl-ldbl-96 test-sinl-pseudo
++CFLAGS-test-sinl-pseudo.c += -fstack-protector-all
+ endif
+diff --git a/sysdeps/ieee754/ldbl-96/e_rem_pio2l.c b/sysdeps/ieee754/ldbl-96/e_rem_pio2l.c
+index 805de22..1aeccb4 100644
+--- a/sysdeps/ieee754/ldbl-96/e_rem_pio2l.c
++++ b/sysdeps/ieee754/ldbl-96/e_rem_pio2l.c
+@@ -210,6 +210,18 @@ __ieee754_rem_pio2l (long double x, long double *y)
+ return 0;
+ }
+
++ if ((i0 & 0x80000000) == 0)
++ {
++ /* Pseudo-zero and unnormal representations are not valid
++ representations of long double. We need to avoid stack
++ corruption in __kernel_rem_pio2, which expects input in a
++ particular normal form, but those representations do not need
++ to be consistently handled like any particular floating-point
++ value. */
++ y[1] = y[0] = __builtin_nanl ("");
++ return 0;
++ }
++
+ /* Split the 64 bits of the mantissa into three 24-bit integers
+ stored in a double array. */
+ exp = j0 - 23;
+diff --git a/sysdeps/ieee754/ldbl-96/test-sinl-pseudo.c b/sysdeps/ieee754/ldbl-96/test-sinl-pseudo.c
+new file mode 100644
+index 0000000..f59b977
+--- /dev/null
++++ b/sysdeps/ieee754/ldbl-96/test-sinl-pseudo.c
+@@ -0,0 +1,41 @@
++/* Test sinl for pseudo-zeros and unnormals for ldbl-96 (bug 25487).
++ Copyright (C) 2020 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <https://www.gnu.org/licenses/>. */
++
++#include <math.h>
++#include <math_ldbl.h>
++#include <stdint.h>
++
++static int
++do_test (void)
++{
++ for (int i = 0; i < 64; i++)
++ {
++ uint64_t sig = i == 63 ? 0 : 1ULL << i;
++ long double ld;
++ SET_LDOUBLE_WORDS (ld, 0x4141,
++ sig >> 32, sig & 0xffffffffULL);
++ /* The requirement is that no stack overflow occurs when the
++ pseudo-zero or unnormal goes through range reduction. */
++ volatile long double ldr;
++ ldr = sinl (ld);
++ (void) ldr;
++ }
++ return 0;
++}
++
++#include <support/test-driver.c>
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-1751.patch b/meta/recipes-core/glibc/glibc/CVE-2020-1751.patch
new file mode 100644
index 0000000000..0ed92d50e9
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-1751.patch
@@ -0,0 +1,70 @@
+From d93769405996dfc11d216ddbe415946617b5a494 Mon Sep 17 00:00:00 2001
+From: Andreas Schwab <schwab@suse.de>
+Date: Mon, 20 Jan 2020 17:01:50 +0100
+Subject: [PATCH] Fix array overflow in backtrace on PowerPC (bug 25423)
+
+When unwinding through a signal frame the backtrace function on PowerPC
+didn't check array bounds when storing the frame address. Fixes commit
+d400dcac5e ("PowerPC: fix backtrace to handle signal trampolines").
+
+CVE: CVE-2020-1751
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ debug/tst-backtrace5.c | 12 ++++++++++++
+ sysdeps/powerpc/powerpc32/backtrace.c | 2 ++
+ sysdeps/powerpc/powerpc64/backtrace.c | 2 ++
+ 3 files changed, 16 insertions(+)
+
+diff --git a/debug/tst-backtrace5.c b/debug/tst-backtrace5.c
+index e7ce410845..b2f46160e7 100644
+--- a/debug/tst-backtrace5.c
++++ b/debug/tst-backtrace5.c
+@@ -89,6 +89,18 @@ handle_signal (int signum)
+ }
+ /* Symbol names are not available for static functions, so we do not
+ check do_test. */
++
++ /* Check that backtrace does not return more than what fits in the array
++ (bug 25423). */
++ for (int j = 0; j < NUM_FUNCTIONS; j++)
++ {
++ n = backtrace (addresses, j);
++ if (n > j)
++ {
++ FAIL ();
++ return;
++ }
++ }
+ }
+
+ NO_INLINE int
+diff --git a/sysdeps/powerpc/powerpc32/backtrace.c b/sysdeps/powerpc/powerpc32/backtrace.c
+index 7c2d4726f8..d1456c8ae4 100644
+--- a/sysdeps/powerpc/powerpc32/backtrace.c
++++ b/sysdeps/powerpc/powerpc32/backtrace.c
+@@ -114,6 +114,8 @@ __backtrace (void **array, int size)
+ }
+ if (gregset)
+ {
++ if (count + 1 == size)
++ break;
+ array[++count] = (void*)((*gregset)[PT_NIP]);
+ current = (void*)((*gregset)[PT_R1]);
+ }
+diff --git a/sysdeps/powerpc/powerpc64/backtrace.c b/sysdeps/powerpc/powerpc64/backtrace.c
+index 65c260ab76..8a53a1088f 100644
+--- a/sysdeps/powerpc/powerpc64/backtrace.c
++++ b/sysdeps/powerpc/powerpc64/backtrace.c
+@@ -87,6 +87,8 @@ __backtrace (void **array, int size)
+ if (is_sigtramp_address (current->return_address))
+ {
+ struct signal_frame_64 *sigframe = (struct signal_frame_64*) current;
++ if (count + 1 == size)
++ break;
+ array[++count] = (void*) sigframe->uc.uc_mcontext.gp_regs[PT_NIP];
+ current = (void*) sigframe->uc.uc_mcontext.gp_regs[PT_R1];
+ }
+--
+2.23.0
+
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-1752.patch b/meta/recipes-core/glibc/glibc/CVE-2020-1752.patch
new file mode 100644
index 0000000000..6c347cd414
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-1752.patch
@@ -0,0 +1,66 @@
+From ddc650e9b3dc916eab417ce9f79e67337b05035c Mon Sep 17 00:00:00 2001
+From: Andreas Schwab <schwab@suse.de>
+Date: Wed, 19 Feb 2020 17:21:46 +0100
+Subject: [PATCH] Fix use-after-free in glob when expanding ~user (bug 25414)
+
+The value of `end_name' points into the value of `dirname', thus don't
+deallocate the latter before the last use of the former.
+
+CVE: CVE-2020-1752
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ posix/glob.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+diff --git a/posix/glob.c b/posix/glob.c
+index cba9cd1819..4580cefb9f 100644
+--- a/posix/glob.c
++++ b/posix/glob.c
+@@ -827,31 +827,32 @@ __glob (const char *pattern, int flags, int (*errfunc) (const char *, int),
+ {
+ size_t home_len = strlen (p->pw_dir);
+ size_t rest_len = end_name == NULL ? 0 : strlen (end_name);
+- char *d;
++ char *d, *newp;
++ bool use_alloca = glob_use_alloca (alloca_used,
++ home_len + rest_len + 1);
+
+- if (__glibc_unlikely (malloc_dirname))
+- free (dirname);
+- malloc_dirname = 0;
+-
+- if (glob_use_alloca (alloca_used, home_len + rest_len + 1))
+- dirname = alloca_account (home_len + rest_len + 1,
+- alloca_used);
++ if (use_alloca)
++ newp = alloca_account (home_len + rest_len + 1, alloca_used);
+ else
+ {
+- dirname = malloc (home_len + rest_len + 1);
+- if (dirname == NULL)
++ newp = malloc (home_len + rest_len + 1);
++ if (newp == NULL)
+ {
+ scratch_buffer_free (&pwtmpbuf);
+ retval = GLOB_NOSPACE;
+ goto out;
+ }
+- malloc_dirname = 1;
+ }
+- d = mempcpy (dirname, p->pw_dir, home_len);
++ d = mempcpy (newp, p->pw_dir, home_len);
+ if (end_name != NULL)
+ d = mempcpy (d, end_name, rest_len);
+ *d = '\0';
+
++ if (__glibc_unlikely (malloc_dirname))
++ free (dirname);
++ dirname = newp;
++ malloc_dirname = !use_alloca;
++
+ dirlen = home_len + rest_len;
+ dirname_modified = 1;
+ }
+--
+2.18.2
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch b/meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch
new file mode 100644
index 0000000000..01c0328362
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-6096-1.patch
@@ -0,0 +1,193 @@
+From 79a4fa341b8a89cb03f84564fd72abaa1a2db394 Mon Sep 17 00:00:00 2001
+From: Evgeny Eremin <e.eremin@omprussia.ru>
+Date: Wed, 8 Jul 2020 14:18:19 +0200
+Subject: [PATCH 1/2] arm: CVE-2020-6096: fix memcpy and memmove for negative
+ length [BZ #25620]
+
+Unsigned branch instructions could be used for r2 to fix the wrong
+behavior when a negative length is passed to memcpy and memmove.
+This commit fixes the generic arm implementation of memcpy amd memmove.
+
+CVE: CVE-2020-6096
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ sysdeps/arm/memcpy.S | 24 ++++++++++--------------
+ sysdeps/arm/memmove.S | 24 ++++++++++--------------
+ 2 files changed, 20 insertions(+), 28 deletions(-)
+
+diff --git a/sysdeps/arm/memcpy.S b/sysdeps/arm/memcpy.S
+index 510e8adaf2..bcfbc51d99 100644
+--- a/sysdeps/arm/memcpy.S
++++ b/sysdeps/arm/memcpy.S
+@@ -68,7 +68,7 @@ ENTRY(memcpy)
+ cfi_remember_state
+
+ subs r2, r2, #4
+- blt 8f
++ blo 8f
+ ands ip, r0, #3
+ PLD( pld [r1, #0] )
+ bne 9f
+@@ -82,7 +82,7 @@ ENTRY(memcpy)
+ cfi_rel_offset (r6, 4)
+ cfi_rel_offset (r7, 8)
+ cfi_rel_offset (r8, 12)
+- blt 5f
++ blo 5f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( rsb r3, ip, #32 )
+@@ -98,9 +98,9 @@ ENTRY(memcpy)
+ #endif
+
+ PLD( pld [r1, #0] )
+-2: PLD( subs r2, r2, #96 )
++2: PLD( cmp r2, #96 )
+ PLD( pld [r1, #28] )
+- PLD( blt 4f )
++ PLD( blo 4f )
+ PLD( pld [r1, #60] )
+ PLD( pld [r1, #92] )
+
+@@ -108,9 +108,7 @@ ENTRY(memcpy)
+ 4: ldmia r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ subs r2, r2, #32
+ stmia r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+- bge 3b
+- PLD( cmn r2, #96 )
+- PLD( bge 4b )
++ bhs 3b
+
+ 5: ands ip, r2, #28
+ rsb ip, ip, #32
+@@ -222,7 +220,7 @@ ENTRY(memcpy)
+ strbge r4, [r0], #1
+ subs r2, r2, ip
+ strb lr, [r0], #1
+- blt 8b
++ blo 8b
+ ands ip, r1, #3
+ beq 1b
+
+@@ -236,7 +234,7 @@ ENTRY(memcpy)
+ .macro forward_copy_shift pull push
+
+ subs r2, r2, #28
+- blt 14f
++ blo 14f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( rsb ip, ip, #32 )
+@@ -253,9 +251,9 @@ ENTRY(memcpy)
+ cfi_rel_offset (r10, 16)
+
+ PLD( pld [r1, #0] )
+- PLD( subs r2, r2, #96 )
++ PLD( cmp r2, #96 )
+ PLD( pld [r1, #28] )
+- PLD( blt 13f )
++ PLD( blo 13f )
+ PLD( pld [r1, #60] )
+ PLD( pld [r1, #92] )
+
+@@ -280,9 +278,7 @@ ENTRY(memcpy)
+ mov ip, ip, PULL #\pull
+ orr ip, ip, lr, PUSH #\push
+ stmia r0!, {r3, r4, r5, r6, r7, r8, r10, ip}
+- bge 12b
+- PLD( cmn r2, #96 )
+- PLD( bge 13b )
++ bhs 12b
+
+ pop {r5 - r8, r10}
+ cfi_adjust_cfa_offset (-20)
+diff --git a/sysdeps/arm/memmove.S b/sysdeps/arm/memmove.S
+index 954037ef3a..0d07b76ee6 100644
+--- a/sysdeps/arm/memmove.S
++++ b/sysdeps/arm/memmove.S
+@@ -85,7 +85,7 @@ ENTRY(memmove)
+ add r1, r1, r2
+ add r0, r0, r2
+ subs r2, r2, #4
+- blt 8f
++ blo 8f
+ ands ip, r0, #3
+ PLD( pld [r1, #-4] )
+ bne 9f
+@@ -99,7 +99,7 @@ ENTRY(memmove)
+ cfi_rel_offset (r6, 4)
+ cfi_rel_offset (r7, 8)
+ cfi_rel_offset (r8, 12)
+- blt 5f
++ blo 5f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( sbcsne r4, ip, r2 ) @ C is always set here
+@@ -114,9 +114,9 @@ ENTRY(memmove)
+ #endif
+
+ PLD( pld [r1, #-4] )
+-2: PLD( subs r2, r2, #96 )
++2: PLD( cmp r2, #96 )
+ PLD( pld [r1, #-32] )
+- PLD( blt 4f )
++ PLD( blo 4f )
+ PLD( pld [r1, #-64] )
+ PLD( pld [r1, #-96] )
+
+@@ -124,9 +124,7 @@ ENTRY(memmove)
+ 4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ subs r2, r2, #32
+ stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+- bge 3b
+- PLD( cmn r2, #96 )
+- PLD( bge 4b )
++ bhs 3b
+
+ 5: ands ip, r2, #28
+ rsb ip, ip, #32
+@@ -237,7 +235,7 @@ ENTRY(memmove)
+ strbge r4, [r0, #-1]!
+ subs r2, r2, ip
+ strb lr, [r0, #-1]!
+- blt 8b
++ blo 8b
+ ands ip, r1, #3
+ beq 1b
+
+@@ -251,7 +249,7 @@ ENTRY(memmove)
+ .macro backward_copy_shift push pull
+
+ subs r2, r2, #28
+- blt 14f
++ blo 14f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( rsb ip, ip, #32 )
+@@ -268,9 +266,9 @@ ENTRY(memmove)
+ cfi_rel_offset (r10, 16)
+
+ PLD( pld [r1, #-4] )
+- PLD( subs r2, r2, #96 )
++ PLD( cmp r2, #96 )
+ PLD( pld [r1, #-32] )
+- PLD( blt 13f )
++ PLD( blo 13f )
+ PLD( pld [r1, #-64] )
+ PLD( pld [r1, #-96] )
+
+@@ -295,9 +293,7 @@ ENTRY(memmove)
+ mov r4, r4, PUSH #\push
+ orr r4, r4, r3, PULL #\pull
+ stmdb r0!, {r4 - r8, r10, ip, lr}
+- bge 12b
+- PLD( cmn r2, #96 )
+- PLD( bge 13b )
++ bhs 12b
+
+ pop {r5 - r8, r10}
+ cfi_adjust_cfa_offset (-20)
+--
+2.17.0
+
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch b/meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch
new file mode 100644
index 0000000000..bfb2d7e7f5
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch
@@ -0,0 +1,111 @@
+From beea361050728138b82c57dda0c4810402d342b9 Mon Sep 17 00:00:00 2001
+From: Alexander Anisimov <a.anisimov@omprussia.ru>
+Date: Wed, 8 Jul 2020 14:18:31 +0200
+Subject: [PATCH 2/2] arm: CVE-2020-6096: Fix multiarch memcpy for negative
+ length [BZ #25620]
+
+Unsigned branch instructions could be used for r2 to fix the wrong
+behavior when a negative length is passed to memcpy.
+This commit fixes the armv7 version.
+
+CVE: CVE-2020-6096
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ sysdeps/arm/armv7/multiarch/memcpy_impl.S | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+index bf4ac7077f..379bb56fc9 100644
+--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S
++++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+@@ -268,7 +268,7 @@ ENTRY(memcpy)
+
+ mov dst, dstin /* Preserve dstin, we need to return it. */
+ cmp count, #64
+- bge .Lcpy_not_short
++ bhs .Lcpy_not_short
+ /* Deal with small copies quickly by dropping straight into the
+ exit block. */
+
+@@ -351,10 +351,10 @@ ENTRY(memcpy)
+
+ 1:
+ subs tmp2, count, #64 /* Use tmp2 for count. */
+- blt .Ltail63aligned
++ blo .Ltail63aligned
+
+ cmp tmp2, #512
+- bge .Lcpy_body_long
++ bhs .Lcpy_body_long
+
+ .Lcpy_body_medium: /* Count in tmp2. */
+ #ifdef USE_VFP
+@@ -378,7 +378,7 @@ ENTRY(memcpy)
+ add src, src, #64
+ vstr d1, [dst, #56]
+ add dst, dst, #64
+- bge 1b
++ bhs 1b
+ tst tmp2, #0x3f
+ beq .Ldone
+
+@@ -412,7 +412,7 @@ ENTRY(memcpy)
+ ldrd A_l, A_h, [src, #64]!
+ strd A_l, A_h, [dst, #64]!
+ subs tmp2, tmp2, #64
+- bge 1b
++ bhs 1b
+ tst tmp2, #0x3f
+ bne 1f
+ ldr tmp2,[sp], #FRAME_SIZE
+@@ -482,7 +482,7 @@ ENTRY(memcpy)
+ add src, src, #32
+
+ subs tmp2, tmp2, #prefetch_lines * 64 * 2
+- blt 2f
++ blo 2f
+ 1:
+ cpy_line_vfp d3, 0
+ cpy_line_vfp d4, 64
+@@ -494,7 +494,7 @@ ENTRY(memcpy)
+ add dst, dst, #2 * 64
+ add src, src, #2 * 64
+ subs tmp2, tmp2, #prefetch_lines * 64
+- bge 1b
++ bhs 1b
+
+ 2:
+ cpy_tail_vfp d3, 0
+@@ -615,8 +615,8 @@ ENTRY(memcpy)
+ 1:
+ pld [src, #(3 * 64)]
+ subs count, count, #64
+- ldrmi tmp2, [sp], #FRAME_SIZE
+- bmi .Ltail63unaligned
++ ldrlo tmp2, [sp], #FRAME_SIZE
++ blo .Ltail63unaligned
+ pld [src, #(4 * 64)]
+
+ #ifdef USE_NEON
+@@ -633,7 +633,7 @@ ENTRY(memcpy)
+ neon_load_multi d0-d3, src
+ neon_load_multi d4-d7, src
+ subs count, count, #64
+- bmi 2f
++ blo 2f
+ 1:
+ pld [src, #(4 * 64)]
+ neon_store_multi d0-d3, dst
+@@ -641,7 +641,7 @@ ENTRY(memcpy)
+ neon_store_multi d4-d7, dst
+ neon_load_multi d4-d7, src
+ subs count, count, #64
+- bpl 1b
++ bhs 1b
+ 2:
+ neon_store_multi d0-d3, dst
+ neon_store_multi d4-d7, dst
+--
+2.17.0
+
diff --git a/meta/recipes-core/glibc/glibc_2.30.bb b/meta/recipes-core/glibc/glibc_2.30.bb
index 7913bc2812..b674b02706 100644
--- a/meta/recipes-core/glibc/glibc_2.30.bb
+++ b/meta/recipes-core/glibc/glibc_2.30.bb
@@ -42,6 +42,11 @@ SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
file://0027-inject-file-assembly-directives.patch \
file://0028-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch \
file://CVE-2019-19126.patch \
+ file://CVE-2020-10029.patch \
+ file://CVE-2020-1751.patch \
+ file://CVE-2020-1752.patch \
+ file://CVE-2020-6096-1.patch \
+ file://CVE-2020-6096-2.patch \
"
S = "${WORKDIR}/git"
B = "${WORKDIR}/build-${TARGET_SYS}"
diff --git a/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
index 6c9049f9ff..e993bde2d7 100644
--- a/meta/recipes-core/images/build-appliance-image_15.0.0.bb
+++ b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
@@ -24,7 +24,7 @@ IMAGE_FSTYPES = "wic.vmdk"
inherit core-image module-base setuptools3
-SRCREV ?= "cf0cefd53c5d4f72e26c74571a10e098996a1ff2"
+SRCREV ?= "f4b1c01110bf6cf7691aa6f214cecd89a52d5661"
SRC_URI = "git://git.yoctoproject.org/poky;branch=zeus \
file://Yocto_Build_Appliance.vmx \
file://Yocto_Build_Appliance.vmxf \
diff --git a/meta/recipes-core/kbd/kbd/0001-configure.ac-Fix-logic-of-vlock-configure-switch.patch b/meta/recipes-core/kbd/kbd/0001-configure.ac-Fix-logic-of-vlock-configure-switch.patch
new file mode 100644
index 0000000000..c3f068f61b
--- /dev/null
+++ b/meta/recipes-core/kbd/kbd/0001-configure.ac-Fix-logic-of-vlock-configure-switch.patch
@@ -0,0 +1,31 @@
+From f7f357ef079b6d185f340e716d7c72a98d82bad0 Mon Sep 17 00:00:00 2001
+From: Garry Filakhtov <filakhtov@gmail.com>
+Date: Fri, 20 Jul 2018 15:58:56 +0200
+Subject: [PATCH] configure.ac: Fix logic of vlock configure switch
+
+Downstream bug report: https://bugs.gentoo.org/661650
+
+Upstream-Status: Backport [f7f357ef079b6d185f340e716d7c72a98d82bad0]
+
+Signed-off-by: Lars Wendler <polynomial-c@gentoo.org>
+Signed-off-by: De Huo <de.huo@windriver.com>
+---
+ configure.ac | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index 87eb63c..07098cf 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -119,7 +119,7 @@ AM_CONDITIONAL(BUILD_LIBKEYMAP, test "$BUILD_LIBKEYMAP" = "yes")
+
+ AC_ARG_ENABLE(vlock,
+ AS_HELP_STRING(--disable-vlock, [do not build vlock]),
+- [VLOCK_PROG=no],[VLOCK_PROG=yes])
++ [VLOCK_PROG=$enableval],[VLOCK_PROG=yes])
+ AM_CONDITIONAL(VLOCK, test "$VLOCK_PROG" = "yes")
+
+ if test "$VLOCK_PROG" = "yes"; then
+--
+2.23.0
+
diff --git a/meta/recipes-core/kbd/kbd_2.0.4.bb b/meta/recipes-core/kbd/kbd_2.0.4.bb
index 4af3256fff..47e76da2b4 100644
--- a/meta/recipes-core/kbd/kbd_2.0.4.bb
+++ b/meta/recipes-core/kbd/kbd_2.0.4.bb
@@ -13,6 +13,7 @@ RCONFLICTS_${PN} = "console-tools"
SRC_URI = "${KERNELORG_MIRROR}/linux/utils/${BPN}/${BP}.tar.xz \
file://run-ptest \
${@bb.utils.contains('DISTRO_FEATURES', 'ptest', 'file://set-proper-path-of-resources.patch', '', d)} \
+ file://0001-configure.ac-Fix-logic-of-vlock-configure-switch.patch \
"
SRC_URI[md5sum] = "c1635a5a83b63aca7f97a3eab39ebaa6"
@@ -58,7 +59,8 @@ RDEPENDS_${PN}-ptest = "make"
inherit update-alternatives
-ALTERNATIVE_${PN} = "chvt deallocvt fgconsole openvt showkey"
+ALTERNATIVE_${PN} = "chvt deallocvt fgconsole openvt showkey \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'vlock','', d)}"
ALTERNATIVE_PRIORITY = "100"
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2019-20388.patch b/meta/recipes-core/libxml/libxml2/CVE-2019-20388.patch
new file mode 100644
index 0000000000..4ee2d4fe62
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2019-20388.patch
@@ -0,0 +1,37 @@
+From 7ffcd44d7e6c46704f8af0321d9314cd26e0e18a Mon Sep 17 00:00:00 2001
+From: Zhipeng Xie <xiezhipeng1@huawei.com>
+Date: Tue, 20 Aug 2019 16:33:06 +0800
+Subject: [PATCH] Fix memory leak in xmlSchemaValidateStream
+
+When ctxt->schema is NULL, xmlSchemaSAXPlug->xmlSchemaPreRun
+alloc a new schema for ctxt->schema and set vctxt->xsiAssemble
+to 1. Then xmlSchemaVStart->xmlSchemaPreRun initialize
+vctxt->xsiAssemble to 0 again which cause the alloced schema
+can not be freed anymore.
+
+Found with libFuzzer.
+
+Upstream-Status: Accepted [https://gitlab.gnome.org/GNOME/libxml2/commit/7ffcd44d7e6c46704f8af0321d9314cd26e0e18a]
+CVE: CVE-2019-20388
+
+Signed-off-by: Zhipeng Xie <xiezhipeng1@huawei.com>
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ xmlschemas.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/xmlschemas.c b/xmlschemas.c
+index 301c8449..39d92182 100644
+--- a/xmlschemas.c
++++ b/xmlschemas.c
+@@ -28090,7 +28090,6 @@ xmlSchemaPreRun(xmlSchemaValidCtxtPtr vctxt) {
+ vctxt->nberrors = 0;
+ vctxt->depth = -1;
+ vctxt->skipDepth = -1;
+- vctxt->xsiAssemble = 0;
+ vctxt->hasKeyrefs = 0;
+ #ifdef ENABLE_IDC_NODE_TABLES_TEST
+ vctxt->createIDCNodeTables = 1;
+--
+2.24.1
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2020-24977.patch b/meta/recipes-core/libxml/libxml2/CVE-2020-24977.patch
new file mode 100644
index 0000000000..8224346660
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2020-24977.patch
@@ -0,0 +1,41 @@
+From 50f06b3efb638efb0abd95dc62dca05ae67882c2 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 7 Aug 2020 21:54:27 +0200
+Subject: [PATCH] Fix out-of-bounds read with 'xmllint --htmlout'
+
+Make sure that truncated UTF-8 sequences don't cause an out-of-bounds
+array access.
+
+Thanks to @SuhwanSong and the Agency for Defense Development (ADD) for
+the report.
+
+Fixes #178.
+
+CVE: CVE-2020-24977
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/50f06b3efb638efb0abd95dc62dca05ae67882c2]
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ xmllint.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/xmllint.c b/xmllint.c
+index f6a8e463..c647486f 100644
+--- a/xmllint.c
++++ b/xmllint.c
+@@ -528,6 +528,12 @@ static void
+ xmlHTMLEncodeSend(void) {
+ char *result;
+
++ /*
++ * xmlEncodeEntitiesReentrant assumes valid UTF-8, but the buffer might
++ * end with a truncated UTF-8 sequence. This is a hack to at least avoid
++ * an out-of-bounds read.
++ */
++ memset(&buffer[sizeof(buffer)-4], 0, 4);
+ result = (char *) xmlEncodeEntitiesReentrant(NULL, BAD_CAST buffer);
+ if (result) {
+ xmlGenericError(xmlGenericErrorContext, "%s", result);
+--
+2.17.1
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2020-7595.patch b/meta/recipes-core/libxml/libxml2/CVE-2020-7595.patch
new file mode 100644
index 0000000000..facfefd362
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2020-7595.patch
@@ -0,0 +1,36 @@
+From 0e1a49c8907645d2e155f0d89d4d9895ac5112b5 Mon Sep 17 00:00:00 2001
+From: Zhipeng Xie <xiezhipeng1@huawei.com>
+Date: Thu, 12 Dec 2019 17:30:55 +0800
+Subject: [PATCH] Fix infinite loop in xmlStringLenDecodeEntities
+
+When ctxt->instate == XML_PARSER_EOF,xmlParseStringEntityRef
+return NULL which cause a infinite loop in xmlStringLenDecodeEntities
+
+Found with libFuzzer.
+
+Signed-off-by: Zhipeng Xie <xiezhipeng1@huawei.com>
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/commit/0e1a49c89076]
+CVE: CVE-2020-7595
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ parser.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/parser.c b/parser.c
+index d1c31963..a34bb6cd 100644
+--- a/parser.c
++++ b/parser.c
+@@ -2646,7 +2646,8 @@ xmlStringLenDecodeEntities(xmlParserCtxtPtr ctxt, const xmlChar *str, int len,
+ else
+ c = 0;
+ while ((c != 0) && (c != end) && /* non input consuming loop */
+- (c != end2) && (c != end3)) {
++ (c != end2) && (c != end3) &&
++ (ctxt->instate != XML_PARSER_EOF)) {
+
+ if (c == 0) break;
+ if ((c == '&') && (str[1] == '#')) {
+--
+2.24.1
+
diff --git a/meta/recipes-core/libxml/libxml2/Fix-CVE-2019-19956.patch b/meta/recipes-core/libxml/libxml2/Fix-CVE-2019-19956.patch
new file mode 100644
index 0000000000..1c2dff9d5f
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/Fix-CVE-2019-19956.patch
@@ -0,0 +1,40 @@
+From 5a02583c7e683896d84878bd90641d8d9b0d0549 Mon Sep 17 00:00:00 2001
+From: Zhipeng Xie <xiezhipeng1@huawei.com>
+Date: Wed, 7 Aug 2019 17:39:17 +0800
+Subject: [PATCH] Fix memory leak in xmlParseBalancedChunkMemoryRecover
+
+When doc is NULL, namespace created in xmlTreeEnsureXMLDecl
+is bind to newDoc->oldNs, in this case, set newDoc->oldNs to
+NULL and free newDoc will cause a memory leak.
+
+Found with libFuzzer.
+
+Closes #82.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/commit/5a02583c7e683896d84878bd90641d8d9b0d0549]
+CVE: CVE-2019-19956
+
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ parser.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/parser.c b/parser.c
+index 1ce1ccf1..26d9f4e3 100644
+--- a/parser.c
++++ b/parser.c
+@@ -13894,7 +13894,8 @@ xmlParseBalancedChunkMemoryRecover(xmlDocPtr doc, xmlSAXHandlerPtr sax,
+ xmlFreeParserCtxt(ctxt);
+ newDoc->intSubset = NULL;
+ newDoc->extSubset = NULL;
+- newDoc->oldNs = NULL;
++ if(doc != NULL)
++ newDoc->oldNs = NULL;
+ xmlFreeDoc(newDoc);
+
+ return(ret);
+--
+2.24.1
+
+
diff --git a/meta/recipes-core/libxml/libxml2_2.9.9.bb b/meta/recipes-core/libxml/libxml2_2.9.9.bb
index c38f883e44..ff496ccfaf 100644
--- a/meta/recipes-core/libxml/libxml2_2.9.9.bb
+++ b/meta/recipes-core/libxml/libxml2_2.9.9.bb
@@ -20,6 +20,10 @@ SRC_URI = "http://www.xmlsoft.org/sources/libxml2-${PV}.tar.gz;name=libtar \
file://libxml-m4-use-pkgconfig.patch \
file://0001-Make-ptest-run-the-python-tests-if-python-is-enabled.patch \
file://fix-execution-of-ptests.patch \
+ file://Fix-CVE-2019-19956.patch \
+ file://CVE-2020-7595.patch \
+ file://CVE-2019-20388.patch \
+ file://CVE-2020-24977.patch \
"
SRC_URI[libtar.md5sum] = "c04a5a0a042eaa157e8e8c9eabe76bd6"
diff --git a/meta/recipes-core/meta/buildtools-extended-tarball.bb b/meta/recipes-core/meta/buildtools-extended-tarball.bb
new file mode 100644
index 0000000000..94ed57585b
--- /dev/null
+++ b/meta/recipes-core/meta/buildtools-extended-tarball.bb
@@ -0,0 +1,36 @@
+require recipes-core/meta/buildtools-tarball.bb
+
+DESCRIPTION = "SDK type target for building a standalone tarball containing build-essentials, python3, chrpath, \
+ make, git and tar. The tarball can be used to run bitbake builds on systems which don't meet the \
+ usual version requirements and have ancient compilers."
+SUMMARY = "Standalone tarball for running builds on systems with inadequate software and ancient compilers"
+LICENSE = "MIT"
+
+# Add nativesdk equivalent of build-essentials
+TOOLCHAIN_HOST_TASK += "\
+ nativesdk-automake \
+ nativesdk-autoconf \
+ nativesdk-binutils \
+ nativesdk-binutils-symlinks \
+ nativesdk-cpp \
+ nativesdk-cpp-symlinks \
+ nativesdk-gcc \
+ nativesdk-gcc-symlinks \
+ nativesdk-g++ \
+ nativesdk-g++-symlinks \
+ nativesdk-gettext \
+ nativesdk-libatomic \
+ nativesdk-libgcc \
+ nativesdk-libstdc++ \
+ nativesdk-libstdc++-dev \
+ nativesdk-libstdc++-staticdev \
+ nativesdk-libtool \
+ nativesdk-pkgconfig \
+ nativesdk-glibc-utils \
+ nativesdk-python \
+ nativesdk-libxcrypt-dev \
+ "
+
+TOOLCHAIN_OUTPUTNAME = "${SDK_ARCH}-buildtools-extended-nativesdk-standalone-${DISTRO_VERSION}"
+
+SDK_TITLE = "Extended Build tools"
diff --git a/meta/recipes-core/meta/buildtools-tarball.bb b/meta/recipes-core/meta/buildtools-tarball.bb
index 91df6f1ae9..ceb60b0e48 100644
--- a/meta/recipes-core/meta/buildtools-tarball.bb
+++ b/meta/recipes-core/meta/buildtools-tarball.bb
@@ -25,6 +25,7 @@ TOOLCHAIN_HOST_TASK ?= "\
nativesdk-texinfo \
nativesdk-libnss-nis \
nativesdk-rpcsvc-proto \
+ nativesdk-patch \
"
MULTIMACH_TARGET_SYS = "${SDK_ARCH}-nativesdk${SDK_VENDOR}-${SDK_OS}"
@@ -72,7 +73,13 @@ create_sdk_files_append () {
toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${SDK_SYS}
echo 'export GIT_SSL_CAINFO="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
+ echo 'export SSL_CERT_FILE="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
+ echo 'export OPENSSL_CONF="${SDKPATHNATIVE}${sysconfdir}/ssl/openssl.cnf"' >>$script
+ mkdir -p ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/
+ echo '${SDKPATHNATIVE}${libdir}
+${SDKPATHNATIVE}${base_libdir}
+include /etc/ld.so.conf' > ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/ld.so.conf
if [ "${SDKMACHINE}" = "i686" ]; then
echo 'export NO32LIBS="0"' >>$script
echo 'echo "$BB_ENV_EXTRAWHITE" | grep -q "NO32LIBS"' >>$script
diff --git a/meta/recipes-core/meta/cve-update-db-native.bb b/meta/recipes-core/meta/cve-update-db-native.bb
index 575254af40..0577a5ccac 100644
--- a/meta/recipes-core/meta/cve-update-db-native.bb
+++ b/meta/recipes-core/meta/cve-update-db-native.bb
@@ -13,8 +13,15 @@ deltask do_install
deltask do_populate_sysroot
python () {
- if not d.getVar("CVE_CHECK_DB_FILE"):
+ cve_check_db_file = d.getVar("CVE_CHECK_DB_FILE")
+ if not cve_check_db_file:
raise bb.parse.SkipRecipe("Skip recipe when cve-check class is not loaded.")
+
+ if os.path.exists("%s-journal" % cve_check_db_file ):
+ os.remove("%s-journal" % cve_check_db_file)
+
+ if os.path.exists(cve_check_db_file):
+ os.remove(cve_check_db_file)
}
python do_populate_cve_db() {
@@ -122,7 +129,7 @@ def parse_node_and_insert(c, node, cveId):
product = cpe23[4]
version = cpe23[5]
- if version != '*':
+ if version != '*' and version != '-':
# Version is defined, this is a '=' match
yield [cveId, vendor, product, version, '=', '', '']
else:
@@ -160,15 +167,20 @@ def update_db(c, jsondata):
if not elt['impact']:
continue
+ accessVector = None
cveId = elt['cve']['CVE_data_meta']['ID']
cveDesc = elt['cve']['description']['description_data'][0]['value']
date = elt['lastModifiedDate']
- accessVector = elt['impact']['baseMetricV2']['cvssV2']['accessVector']
- cvssv2 = elt['impact']['baseMetricV2']['cvssV2']['baseScore']
-
try:
+ accessVector = elt['impact']['baseMetricV2']['cvssV2']['accessVector']
+ cvssv2 = elt['impact']['baseMetricV2']['cvssV2']['baseScore']
+ except KeyError:
+ cvssv2 = 0.0
+ try:
+ accessVector = accessVector or elt['impact']['baseMetricV3']['cvssV3']['attackVector']
cvssv3 = elt['impact']['baseMetricV3']['cvssV3']['baseScore']
- except:
+ except KeyError:
+ accessVector = accessVector or "UNKNOWN"
cvssv3 = 0.0
c.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
diff --git a/meta/recipes-core/meta/dummy-sdk-package.inc b/meta/recipes-core/meta/dummy-sdk-package.inc
index 4d653706b1..0d15a37c35 100644
--- a/meta/recipes-core/meta/dummy-sdk-package.inc
+++ b/meta/recipes-core/meta/dummy-sdk-package.inc
@@ -17,6 +17,9 @@ ALLOW_EMPTY_${PN} = "1"
PR[vardeps] += "DUMMYPROVIDES"
+DUMMYPROVIDES_PACKAGES ??= ""
+DUMMYPROVIDES += "${@' '.join([multilib_pkg_extend(d, pkg) for pkg in d.getVar('DUMMYPROVIDES_PACKAGES').split()])}"
+
python populate_packages_prepend() {
p = d.getVar("PN")
d.appendVar("RPROVIDES_%s" % p, "${DUMMYPROVIDES}")
diff --git a/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb b/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb
index 6a8748acdf..cfa41c4ae6 100644
--- a/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb
+++ b/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb
@@ -1,6 +1,6 @@
DUMMYARCH = "buildtools-dummy-${SDKPKGSUFFIX}"
-DUMMYPROVIDES = "\
+DUMMYPROVIDES_PACKAGES = "\
nativesdk-perl \
nativesdk-libxml-parser-perl \
nativesdk-perl-module-bytes \
@@ -15,12 +15,18 @@ DUMMYPROVIDES = "\
nativesdk-perl-module-file-find \
nativesdk-perl-module-file-glob \
nativesdk-perl-module-file-path \
+ nativesdk-perl-module-file-spec \
nativesdk-perl-module-file-stat \
nativesdk-perl-module-getopt-long \
nativesdk-perl-module-io-file \
+ nativesdk-perl-module-overloading \
nativesdk-perl-module-posix \
nativesdk-perl-module-thread-queue \
nativesdk-perl-module-threads \
+ nativesdk-perl-module-warnings \
+"
+
+DUMMYPROVIDES = "\
/usr/bin/perl \
"
diff --git a/meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb b/meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb
index b891efa5ef..29f4dd3633 100644
--- a/meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb
+++ b/meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb
@@ -1,10 +1,13 @@
DUMMYARCH = "sdk-provides-dummy-${SDKPKGSUFFIX}"
+DUMMYPROVIDES_PACKAGES = "\
+ pkgconfig \
+"
+
# Add /bin/sh?
DUMMYPROVIDES = "\
/bin/bash \
/usr/bin/env \
- pkgconfig \
libGL.so()(64bit) \
libGL.so \
"
diff --git a/meta/recipes-core/meta/target-sdk-provides-dummy.bb b/meta/recipes-core/meta/target-sdk-provides-dummy.bb
index 87b8bfab9c..e3beeb796c 100644
--- a/meta/recipes-core/meta/target-sdk-provides-dummy.bb
+++ b/meta/recipes-core/meta/target-sdk-provides-dummy.bb
@@ -48,7 +48,6 @@ DUMMYPROVIDES_PACKAGES = "\
"
DUMMYPROVIDES = "\
- ${@' '.join([multilib_pkg_extend(d, pkg) for pkg in d.getVar('DUMMYPROVIDES_PACKAGES').split()])} \
/bin/sh \
/bin/bash \
/usr/bin/env \
diff --git a/meta/recipes-core/ncurses/ncurses.inc b/meta/recipes-core/ncurses/ncurses.inc
index 5f2cc35823..b7bf4c0d81 100644
--- a/meta/recipes-core/ncurses/ncurses.inc
+++ b/meta/recipes-core/ncurses/ncurses.inc
@@ -87,6 +87,7 @@ ncurses_configure() {
--disable-rpath-hack \
${EXCONFIG_ARGS} \
--with-manpage-format=normal \
+ --without-manpage-renames \
--disable-stripping \
"$@" || return 1
cd ..
diff --git a/meta/recipes-core/ncurses/ncurses_6.1+20190803.bb b/meta/recipes-core/ncurses/ncurses_6.1+20190803.bb
index e638a3737c..c3a89f1c4f 100644
--- a/meta/recipes-core/ncurses/ncurses_6.1+20190803.bb
+++ b/meta/recipes-core/ncurses/ncurses_6.1+20190803.bb
@@ -10,3 +10,5 @@ SRCREV = "3c9b2677c96c645496997321bf2fe465a5e7e21f"
S = "${WORKDIR}/git"
EXTRA_OECONF += "--with-abi-version=5 --cache-file=${B}/config.cache"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+(\.\d+)+(\+\d+)*)"
+
+CVE_VERSION = "6.1.${@d.getVar("PV").split('+')[1]}"
diff --git a/meta/recipes-core/systemd/systemd/0001-Merge-branch-polkit-ref-count.patch b/meta/recipes-core/systemd/systemd/0001-Merge-branch-polkit-ref-count.patch
new file mode 100644
index 0000000000..e684ab8755
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0001-Merge-branch-polkit-ref-count.patch
@@ -0,0 +1,520 @@
+From 0062d795bf29301ae054e1826a7189198a2565c4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Tue, 14 Apr 2020 09:06:53 +0000
+Subject: [PATCH] Merge branch 'polkit-ref-count'
+
+Upsteam-Status: Backport [https://github.com/systemd/systemd/commit/ea0d0ede03c6f18dbc5036c5e9cccf97e415ccc2]
+CVE: CVE-2020-1712
+
+Signed-off-by: Wenlin Kang <wenlin.kang@windriver.com>
+---
+ TODO | 2 +-
+ man/rules/meson.build | 1 +
+ man/sd_bus_enqueue_for_read.xml | 88 ++++++++++++++++
+ src/libsystemd/libsystemd.sym | 1 +
+ src/libsystemd/sd-bus/sd-bus.c | 24 +++++
+ src/shared/bus-util.c | 179 +++++++++++++++++++++-----------
+ src/systemd/sd-bus.h | 1 +
+ 7 files changed, 235 insertions(+), 61 deletions(-)
+ create mode 100644 man/sd_bus_enqueue_for_read.xml
+
+diff --git a/TODO b/TODO
+index c5b5b86057..5c5ea1f568 100644
+--- a/TODO
++++ b/TODO
+@@ -184,7 +184,7 @@ Features:
+
+ * the a-posteriori stopping of units bound to units that disappeared logic
+ should be reworked: there should be a queue of units, and we should only
+- enqeue stop jobs from a defer event that processes queue instead of
++ enqueue stop jobs from a defer event that processes queue instead of
+ right-away when we find a unit that is bound to one that doesn't exist
+ anymore. (similar to how the stop-unneeded queue has been reworked the same
+ way)
+diff --git a/man/rules/meson.build b/man/rules/meson.build
+index 3b63311d7b..e80ed98c34 100644
+--- a/man/rules/meson.build
++++ b/man/rules/meson.build
+@@ -192,6 +192,7 @@ manpages = [
+ 'sd_bus_open_user_with_description',
+ 'sd_bus_open_with_description'],
+ ''],
++ ['sd_bus_enqueue_for_read', '3', [], ''],
+ ['sd_bus_error',
+ '3',
+ ['SD_BUS_ERROR_MAKE_CONST',
+diff --git a/man/sd_bus_enqueue_for_read.xml b/man/sd_bus_enqueue_for_read.xml
+new file mode 100644
+index 0000000000..3318a3031b
+--- /dev/null
++++ b/man/sd_bus_enqueue_for_read.xml
+@@ -0,0 +1,88 @@
++<?xml version='1.0'?>
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
++ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++<!-- SPDX-License-Identifier: LGPL-2.1+ -->
++
++<refentry id="sd_bus_enqueue_for_read"
++ xmlns:xi="http://www.w3.org/2001/XInclude">
++
++ <refentryinfo>
++ <title>sd_bus_enqueue_for_read</title>
++ <productname>systemd</productname>
++ </refentryinfo>
++
++ <refmeta>
++ <refentrytitle>sd_bus_enqueue_for_read</refentrytitle>
++ <manvolnum>3</manvolnum>
++ </refmeta>
++
++ <refnamediv>
++ <refname>sd_bus_enqueue_for_read</refname>
++
++ <refpurpose>Re-enqueue a bus message on a bus connection, for reading.</refpurpose>
++ </refnamediv>
++
++ <refsynopsisdiv>
++ <funcsynopsis>
++ <funcsynopsisinfo>#include &lt;systemd/sd-bus.h&gt;</funcsynopsisinfo>
++
++ <funcprototype>
++ <funcdef>int <function>sd_bus_enqueue_for_read</function></funcdef>
++ <paramdef>sd_bus *<parameter>bus</parameter></paramdef>
++ <paramdef>sd_bus_message *<parameter>message</parameter></paramdef>
++ </funcprototype>
++
++ </funcsynopsis>
++ </refsynopsisdiv>
++
++ <refsect1>
++ <title>Description</title>
++
++ <para><function>sd_bus_enqueue_for_read()</function> may be used to re-enqueue an incoming bus message on
++ the local read queue, so that it is processed and dispatched locally again, similar to how an incoming
++ message from the peer is processed. Takes a bus connection object and the message to enqueue. A reference
++ is taken of the message and the caller's reference thus remains in possession of the caller. The message
++ is enqueued at the end of the queue, thus will be dispatched after all other already queued messages are
++ dispatched.</para>
++
++ <para>This call is primarily useful for dealing with incoming method calls that may be processed only
++ after an additional asynchronous operation completes. One example are PolicyKit authorization requests
++ that are determined to be necessary to authorize a newly incoming method call: when the PolicyKit response
++ is received the original method call may be re-enqueued to process it again, this time with the
++ authorization result known.</para>
++ </refsect1>
++
++ <refsect1>
++ <title>Return Value</title>
++
++ <para>On success, this function return 0 or a positive integer. On failure, it returns a negative errno-style
++ error code.</para>
++
++ <refsect2>
++ <title>Errors</title>
++
++ <para>Returned errors may indicate the following problems:</para>
++
++ <variablelist>
++ <varlistentry>
++ <term><constant>-ECHILD</constant></term>
++
++ <listitem><para>The bus connection has been created in a different process.</para></listitem>
++ </varlistentry>
++ </variablelist>
++ </refsect2>
++ </refsect1>
++
++ <xi:include href="libsystemd-pkgconfig.xml" />
++
++ <refsect1>
++ <title>See Also</title>
++
++ <para>
++ <citerefentry><refentrytitle>systemd</refentrytitle><manvolnum>1</manvolnum></citerefentry>,
++ <citerefentry><refentrytitle>sd-bus</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
++ <citerefentry><refentrytitle>sd_bus_send</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
++ </para>
++ </refsect1>
++
++</refentry>
+diff --git a/src/libsystemd/libsystemd.sym b/src/libsystemd/libsystemd.sym
+index 5ec42e0f1f..c40f1b7d1a 100644
+--- a/src/libsystemd/libsystemd.sym
++++ b/src/libsystemd/libsystemd.sym
+@@ -679,6 +679,7 @@ global:
+
+ LIBSYSTEMD_243 {
+ global:
++ sd_bus_enqueue_for_read;
+ sd_bus_object_vtable_format;
+ sd_event_source_disable_unref;
+ } LIBSYSTEMD_241;
+diff --git a/src/libsystemd/sd-bus/sd-bus.c b/src/libsystemd/sd-bus/sd-bus.c
+index 026ac8cb94..07bc145f37 100644
+--- a/src/libsystemd/sd-bus/sd-bus.c
++++ b/src/libsystemd/sd-bus/sd-bus.c
+@@ -4194,3 +4194,27 @@ _public_ int sd_bus_get_close_on_exit(sd_bus *bus) {
+
+ return bus->close_on_exit;
+ }
++
++_public_ int sd_bus_enqueue_for_read(sd_bus *bus, sd_bus_message *m) {
++ int r;
++
++ assert_return(bus, -EINVAL);
++ assert_return(bus = bus_resolve(bus), -ENOPKG);
++ assert_return(m, -EINVAL);
++ assert_return(m->sealed, -EINVAL);
++ assert_return(!bus_pid_changed(bus), -ECHILD);
++
++ if (!BUS_IS_OPEN(bus->state))
++ return -ENOTCONN;
++
++ /* Re-enqueue a message for reading. This is primarily useful for PolicyKit-style authentication,
++ * where we accept a message, then determine we need to interactively authenticate the user, and then
++ * we want to process the message again. */
++
++ r = bus_rqueue_make_room(bus);
++ if (r < 0)
++ return r;
++
++ bus->rqueue[bus->rqueue_size++] = bus_message_ref_queued(m, bus);
++ return 0;
++}
+diff --git a/src/shared/bus-util.c b/src/shared/bus-util.c
+index e9b0b8a99d..88cad9cd0a 100644
+--- a/src/shared/bus-util.c
++++ b/src/shared/bus-util.c
+@@ -212,6 +212,34 @@ static int check_good_user(sd_bus_message *m, uid_t good_user) {
+ return sender_uid == good_user;
+ }
+
++#if ENABLE_POLKIT
++static int bus_message_append_strv_key_value(
++ sd_bus_message *m,
++ const char **l) {
++
++ const char **k, **v;
++ int r;
++
++ assert(m);
++
++ r = sd_bus_message_open_container(m, 'a', "{ss}");
++ if (r < 0)
++ return r;
++
++ STRV_FOREACH_PAIR(k, v, l) {
++ r = sd_bus_message_append(m, "{ss}", *k, *v);
++ if (r < 0)
++ return r;
++ }
++
++ r = sd_bus_message_close_container(m);
++ if (r < 0)
++ return r;
++
++ return r;
++}
++#endif
++
+ int bus_test_polkit(
+ sd_bus_message *call,
+ int capability,
+@@ -219,7 +247,7 @@ int bus_test_polkit(
+ const char **details,
+ uid_t good_user,
+ bool *_challenge,
+- sd_bus_error *e) {
++ sd_bus_error *ret_error) {
+
+ int r;
+
+@@ -242,7 +270,7 @@ int bus_test_polkit(
+ _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL;
+ _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL;
+ int authorized = false, challenge = false;
+- const char *sender, **k, **v;
++ const char *sender;
+
+ sender = sd_bus_message_get_sender(call);
+ if (!sender)
+@@ -266,17 +294,7 @@ int bus_test_polkit(
+ if (r < 0)
+ return r;
+
+- r = sd_bus_message_open_container(request, 'a', "{ss}");
+- if (r < 0)
+- return r;
+-
+- STRV_FOREACH_PAIR(k, v, details) {
+- r = sd_bus_message_append(request, "{ss}", *k, *v);
+- if (r < 0)
+- return r;
+- }
+-
+- r = sd_bus_message_close_container(request);
++ r = bus_message_append_strv_key_value(request, details);
+ if (r < 0)
+ return r;
+
+@@ -284,11 +302,11 @@ int bus_test_polkit(
+ if (r < 0)
+ return r;
+
+- r = sd_bus_call(call->bus, request, 0, e, &reply);
++ r = sd_bus_call(call->bus, request, 0, ret_error, &reply);
+ if (r < 0) {
+ /* Treat no PK available as access denied */
+- if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN)) {
+- sd_bus_error_free(e);
++ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) {
++ sd_bus_error_free(ret_error);
+ return -EACCES;
+ }
+
+@@ -319,15 +337,17 @@ int bus_test_polkit(
+ #if ENABLE_POLKIT
+
+ typedef struct AsyncPolkitQuery {
++ char *action;
++ char **details;
++
+ sd_bus_message *request, *reply;
+- sd_bus_message_handler_t callback;
+- void *userdata;
+ sd_bus_slot *slot;
++
+ Hashmap *registry;
++ sd_event_source *defer_event_source;
+ } AsyncPolkitQuery;
+
+ static void async_polkit_query_free(AsyncPolkitQuery *q) {
+-
+ if (!q)
+ return;
+
+@@ -339,9 +359,25 @@ static void async_polkit_query_free(AsyncPolkitQuery *q) {
+ sd_bus_message_unref(q->request);
+ sd_bus_message_unref(q->reply);
+
++ free(q->action);
++ strv_free(q->details);
++
++ sd_event_source_disable_unref(q->defer_event_source);
+ free(q);
+ }
+
++static int async_polkit_defer(sd_event_source *s, void *userdata) {
++ AsyncPolkitQuery *q = userdata;
++
++ assert(s);
++
++ /* This is called as idle event source after we processed the async polkit reply, hopefully after the
++ * method call we re-enqueued has been properly processed. */
++
++ async_polkit_query_free(q);
++ return 0;
++}
++
+ static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) {
+ _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL;
+ AsyncPolkitQuery *q = userdata;
+@@ -350,21 +386,46 @@ static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_e
+ assert(reply);
+ assert(q);
+
++ assert(q->slot);
+ q->slot = sd_bus_slot_unref(q->slot);
++
++ assert(!q->reply);
+ q->reply = sd_bus_message_ref(reply);
+
++ /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the
++ * whole message processing again, and thus re-validating and re-retrieving the "userdata" field
++ * again.
++ *
++ * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again,
++ * i.e. after the second time the message is processed is complete. */
++
++ assert(!q->defer_event_source);
++ r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q);
++ if (r < 0)
++ goto fail;
++
++ r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE);
++ if (r < 0)
++ goto fail;
++
++ r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT);
++ if (r < 0)
++ goto fail;
++
+ r = sd_bus_message_rewind(q->request, true);
+- if (r < 0) {
+- r = sd_bus_reply_method_errno(q->request, r, NULL);
+- goto finish;
+- }
++ if (r < 0)
++ goto fail;
+
+- r = q->callback(q->request, q->userdata, &error_buffer);
+- r = bus_maybe_reply_error(q->request, r, &error_buffer);
++ r = sd_bus_enqueue_for_read(sd_bus_message_get_bus(q->request), q->request);
++ if (r < 0)
++ goto fail;
+
+-finish:
+- async_polkit_query_free(q);
++ return 1;
+
++fail:
++ log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m");
++ (void) sd_bus_reply_method_errno(q->request, r, NULL);
++ async_polkit_query_free(q);
+ return r;
+ }
+
+@@ -378,16 +439,14 @@ int bus_verify_polkit_async(
+ bool interactive,
+ uid_t good_user,
+ Hashmap **registry,
+- sd_bus_error *error) {
++ sd_bus_error *ret_error) {
+
+ #if ENABLE_POLKIT
+ _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL;
+ AsyncPolkitQuery *q;
+- const char *sender, **k, **v;
+- sd_bus_message_handler_t callback;
+- void *userdata;
+ int c;
+ #endif
++ const char *sender;
+ int r;
+
+ assert(call);
+@@ -403,11 +462,17 @@ int bus_verify_polkit_async(
+ if (q) {
+ int authorized, challenge;
+
+- /* This is the second invocation of this function, and
+- * there's already a response from polkit, let's
+- * process it */
++ /* This is the second invocation of this function, and there's already a response from
++ * polkit, let's process it */
+ assert(q->reply);
+
++ /* If the operation we want to authenticate changed between the first and the second time,
++ * let's not use this authentication, it might be out of date as the object and context we
++ * operate on might have changed. */
++ if (!streq(q->action, action) ||
++ !strv_equal(q->details, (char**) details))
++ return -ESTALE;
++
+ if (sd_bus_message_is_method_error(q->reply, NULL)) {
+ const sd_bus_error *e;
+
+@@ -418,7 +483,7 @@ int bus_verify_polkit_async(
+ return -EACCES;
+
+ /* Copy error from polkit reply */
+- sd_bus_error_copy(error, e);
++ sd_bus_error_copy(ret_error, e);
+ return -sd_bus_error_get_errno(e);
+ }
+
+@@ -433,7 +498,7 @@ int bus_verify_polkit_async(
+ return 1;
+
+ if (challenge)
+- return sd_bus_error_set(error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required.");
++ return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required.");
+
+ return -EACCES;
+ }
+@@ -445,20 +510,12 @@ int bus_verify_polkit_async(
+ else if (r > 0)
+ return 1;
+
+-#if ENABLE_POLKIT
+- if (sd_bus_get_current_message(call->bus) != call)
+- return -EINVAL;
+-
+- callback = sd_bus_get_current_handler(call->bus);
+- if (!callback)
+- return -EINVAL;
+-
+- userdata = sd_bus_get_current_userdata(call->bus);
+
+ sender = sd_bus_message_get_sender(call);
+ if (!sender)
+ return -EBADMSG;
+
++#if ENABLE_POLKIT
+ c = sd_bus_message_get_allow_interactive_authorization(call);
+ if (c < 0)
+ return c;
+@@ -487,17 +544,7 @@ int bus_verify_polkit_async(
+ if (r < 0)
+ return r;
+
+- r = sd_bus_message_open_container(pk, 'a', "{ss}");
+- if (r < 0)
+- return r;
+-
+- STRV_FOREACH_PAIR(k, v, details) {
+- r = sd_bus_message_append(pk, "{ss}", *k, *v);
+- if (r < 0)
+- return r;
+- }
+-
+- r = sd_bus_message_close_container(pk);
++ r = bus_message_append_strv_key_value(pk, details);
+ if (r < 0)
+ return r;
+
+@@ -505,13 +552,25 @@ int bus_verify_polkit_async(
+ if (r < 0)
+ return r;
+
+- q = new0(AsyncPolkitQuery, 1);
++ q = new(AsyncPolkitQuery, 1);
+ if (!q)
+ return -ENOMEM;
+
+- q->request = sd_bus_message_ref(call);
+- q->callback = callback;
+- q->userdata = userdata;
++ *q = (AsyncPolkitQuery) {
++ .request = sd_bus_message_ref(call),
++ };
++
++ q->action = strdup(action);
++ if (!q->action) {
++ async_polkit_query_free(q);
++ return -ENOMEM;
++ }
++
++ q->details = strv_copy((char**) details);
++ if (!q->details) {
++ async_polkit_query_free(q);
++ return -ENOMEM;
++ }
+
+ r = hashmap_put(*registry, call, q);
+ if (r < 0) {
+diff --git a/src/systemd/sd-bus.h b/src/systemd/sd-bus.h
+index 84ceb62dc7..0e5c761f83 100644
+--- a/src/systemd/sd-bus.h
++++ b/src/systemd/sd-bus.h
+@@ -201,6 +201,7 @@ int sd_bus_process(sd_bus *bus, sd_bus_message **r);
+ int sd_bus_process_priority(sd_bus *bus, int64_t max_priority, sd_bus_message **r);
+ int sd_bus_wait(sd_bus *bus, uint64_t timeout_usec);
+ int sd_bus_flush(sd_bus *bus);
++int sd_bus_enqueue_for_read(sd_bus *bus, sd_bus_message *m);
+
+ sd_bus_slot* sd_bus_get_current_slot(sd_bus *bus);
+ sd_bus_message* sd_bus_get_current_message(sd_bus *bus);
+--
+2.23.0
+
diff --git a/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch b/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch
new file mode 100644
index 0000000000..7b5e3e7f7a
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch
@@ -0,0 +1,96 @@
+From 156a5fd297b61bce31630d7a52c15614bf784843 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Sun, 31 May 2020 18:21:09 +0200
+Subject: [PATCH 1/1] basic/user-util: always use base 10 for user/group
+ numbers
+
+We would parse numbers with base prefixes as user identifiers. For example,
+"0x2b3bfa0" would be interpreted as UID==45334432 and "01750" would be
+interpreted as UID==1000. This parsing was used also in cases where either a
+user/group name or number may be specified. This means that names like
+0x2b3bfa0 would be ambiguous: they are a valid user name according to our
+documented relaxed rules, but they would also be parsed as numeric uids.
+
+This behaviour is definitely not expected by users, since tools generally only
+accept decimal numbers (e.g. id, getent passwd), while other tools only accept
+user names and thus will interpret such strings as user names without even
+attempting to convert them to numbers (su, ssh). So let's follow suit and only
+accept numbers in decimal notation. Effectively this means that we will reject
+such strings as a username/uid/groupname/gid where strict mode is used, and try
+to look up a user/group with such a name in relaxed mode.
+
+Since the function changed is fairly low-level and fairly widely used, this
+affects multiple tools: loginctl show-user/enable-linger/disable-linger foo',
+the third argument in sysusers.d, fourth and fifth arguments in tmpfiles.d,
+etc.
+
+Fixes #15985.
+---
+ src/basic/user-util.c | 2 +-
+ src/test/test-user-util.c | 10 ++++++++++
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+--- end of commit 156a5fd297b61bce31630d7a52c15614bf784843 ---
+
+
+Add definition of safe_atou32_full() from commit b934ac3d6e7dcad114776ef30ee9098693e7ab7e
+
+CVE: CVE-2020-13776
+
+Upstream-Status: Backport [https://github.com/systemd/systemd.git]
+
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+
+
+
+--- git.orig/src/basic/user-util.c
++++ git/src/basic/user-util.c
+@@ -49,7 +49,7 @@ int parse_uid(const char *s, uid_t *ret)
+ assert(s);
+
+ assert_cc(sizeof(uid_t) == sizeof(uint32_t));
+- r = safe_atou32(s, &uid);
++ r = safe_atou32_full(s, 10, &uid);
+ if (r < 0)
+ return r;
+
+--- git.orig/src/test/test-user-util.c
++++ git/src/test/test-user-util.c
+@@ -48,9 +48,19 @@ static void test_parse_uid(void) {
+
+ r = parse_uid("65535", &uid);
+ assert_se(r == -ENXIO);
++ assert_se(uid == 100);
++
++ r = parse_uid("0x1234", &uid);
++ assert_se(r == -EINVAL);
++ assert_se(uid == 100);
++
++ r = parse_uid("01234", &uid);
++ assert_se(r == 0);
++ assert_se(uid == 1234);
+
+ r = parse_uid("asdsdas", &uid);
+ assert_se(r == -EINVAL);
++ assert_se(uid == 1234);
+ }
+
+ static void test_uid_ptr(void) {
+--- git.orig/src/basic/parse-util.h
++++ git/src/basic/parse-util.h
+@@ -45,9 +45,13 @@ static inline int safe_atoux16(const cha
+
+ int safe_atoi16(const char *s, int16_t *ret);
+
+-static inline int safe_atou32(const char *s, uint32_t *ret_u) {
++static inline int safe_atou32_full(const char *s, unsigned base, uint32_t *ret_u) {
+ assert_cc(sizeof(uint32_t) == sizeof(unsigned));
+- return safe_atou(s, (unsigned*) ret_u);
++ return safe_atou_full(s, base, (unsigned*) ret_u);
++}
++
++static inline int safe_atou32(const char *s, uint32_t *ret_u) {
++ return safe_atou32_full(s, 0, (unsigned*) ret_u);
+ }
+
+ static inline int safe_atoi32(const char *s, int32_t *ret_i) {
diff --git a/meta/recipes-core/systemd/systemd_243.2.bb b/meta/recipes-core/systemd/systemd_243.2.bb
index 6e7f95693b..905348176c 100644
--- a/meta/recipes-core/systemd/systemd_243.2.bb
+++ b/meta/recipes-core/systemd/systemd_243.2.bb
@@ -24,6 +24,8 @@ SRC_URI += "file://touchscreen.rules \
file://0005-rules-watch-metadata-changes-in-ide-devices.patch \
file://0001-unit-file.c-consider-symlink-on-filesystems-like-NFS.patch \
file://99-default.preset \
+ file://0001-Merge-branch-polkit-ref-count.patch \
+ file://CVE-2020-13776.patch \
"
# patches needed by musl
diff --git a/meta/recipes-core/sysvinit/sysvinit_2.88dsf.bb b/meta/recipes-core/sysvinit/sysvinit_2.88dsf.bb
index bfc1283f73..39f612be1f 100644
--- a/meta/recipes-core/sysvinit/sysvinit_2.88dsf.bb
+++ b/meta/recipes-core/sysvinit/sysvinit_2.88dsf.bb
@@ -31,6 +31,7 @@ B = "${S}/src"
inherit update-alternatives distro_features_check
DEPENDS_append = " update-rc.d-native base-passwd virtual/crypt"
+do_package_setscene[depends] = "${MLPREFIX}base-passwd:do_populate_sysroot"
REQUIRED_DISTRO_FEATURES = "sysvinit"
diff --git a/meta/recipes-devtools/apt/files/apt.conf b/meta/recipes-devtools/apt/files/apt.conf
index 03351356bc..c95a5b07af 100644
--- a/meta/recipes-devtools/apt/files/apt.conf
+++ b/meta/recipes-devtools/apt/files/apt.conf
@@ -39,4 +39,4 @@ APT
};
};
-DPkg::Options {"--root=#ROOTFS#";"--admindir=#ROOTFS#/var/lib/dpkg";"--force-all";"--no-debsig"};
+DPkg::Options {"--root=#ROOTFS#";"--admindir=#ROOTFS#/var/lib/dpkg";"--force-all";"--no-force-overwrite";"--no-debsig"};
diff --git a/meta/recipes-devtools/binutils/binutils-2.32.inc b/meta/recipes-devtools/binutils/binutils-2.32.inc
index 349c3e1154..1f2d033a6c 100644
--- a/meta/recipes-devtools/binutils/binutils-2.32.inc
+++ b/meta/recipes-devtools/binutils/binutils-2.32.inc
@@ -51,6 +51,7 @@ SRC_URI = "\
file://CVE-2019-14444.patch \
file://CVE-2019-17450.patch \
file://CVE-2019-17451.patch \
+ file://0001-Fix-a-missing-include-of-string.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/binutils/binutils/0001-Fix-a-missing-include-of-string.patch b/meta/recipes-devtools/binutils/binutils/0001-Fix-a-missing-include-of-string.patch
new file mode 100644
index 0000000000..9f52ed8938
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/0001-Fix-a-missing-include-of-string.patch
@@ -0,0 +1,49 @@
+From 1aaf9d481a7c0e20675df165a4968e255521bea8 Mon Sep 17 00:00:00 2001
+From: Trevor Gamblin <trevor.gamblin@windriver.com>
+Date: Wed, 28 Apr 2021 09:25:08 -0400
+Subject: [PATCH] Fix a missing include of <string>
+
+gold/ChangeLog:
+
+2019-06-07 Martin Liska <mliska@suse.cz>
+
+ * errors.h: Include string.
+
+Upstream-Status: Backport
+(https://github.com/bminor/binutils-gdb/commit/a3972330f)
+
+Signed-off-by: Trevor Gamblin <trevor.gamblin@windriver.com>
+---
+ gold/ChangeLog | 3 +++
+ gold/errors.h | 1 +
+ 2 files changed, 4 insertions(+)
+
+diff --git a/gold/ChangeLog b/gold/ChangeLog
+index 458bed793e0..00f804b1bf6 100644
+--- a/gold/ChangeLog
++++ b/gold/ChangeLog
+@@ -2,6 +2,9 @@
+
+ 2.32 Release.
+
++2019-06-10 Martin Liska <mliska@suse.cz>
++
++ * errors.h: Include string.
+ 2019-01-21 Nick Clifton <nickc@redhat.com>
+
+ * po/uk.po: Updated Ukranian translation.
+diff --git a/gold/errors.h b/gold/errors.h
+index c26b5586379..ac681e965bb 100644
+--- a/gold/errors.h
++++ b/gold/errors.h
+@@ -24,6 +24,7 @@
+ #define GOLD_ERRORS_H
+
+ #include <cstdarg>
++#include <string>
+
+ #include "gold-threads.h"
+
+--
+2.30.2
+
diff --git a/meta/recipes-devtools/binutils/binutils/nativesdk-relocation.patch b/meta/recipes-devtools/binutils/binutils/nativesdk-relocation.patch
new file mode 100644
index 0000000000..408f7d18b7
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/nativesdk-relocation.patch
@@ -0,0 +1,80 @@
+We need binutils to look at our ld.so.conf file within the SDK to ensure
+we search the SDK's libdirs as well as those from the host system.
+
+We therefore pass in the directory to the code using a define, then add
+it to a section we relocate in a similar way to the way we relocate the
+gcc internal paths. This ensures that ld works correctly in our buildtools
+tarball.
+
+Standard sysroot relocation doesn't work since we're not in a sysroot,
+we want to use both the host system and SDK libs.
+
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+2020/1/17
+Upstream-Status: Inappropriate [OE specific tweak]
+
+Index: git/ld/Makefile.am
+===================================================================
+--- git.orig/ld/Makefile.am
++++ git/ld/Makefile.am
+@@ -36,7 +36,8 @@ am__skipyacc =
+
+ ELF_CLFAGS=-DELF_LIST_OPTIONS=@elf_list_options@ \
+ -DELF_SHLIB_LIST_OPTIONS=@elf_shlib_list_options@ \
+- -DELF_PLT_UNWIND_LIST_OPTIONS=@elf_plt_unwind_list_options@
++ -DELF_PLT_UNWIND_LIST_OPTIONS=@elf_plt_unwind_list_options@ \
++ -DSYSCONFDIR="\"$(sysconfdir)\""
+ WARN_CFLAGS = @WARN_CFLAGS@
+ NO_WERROR = @NO_WERROR@
+ AM_CFLAGS = $(WARN_CFLAGS) $(ELF_CLFAGS)
+Index: git/ld/Makefile.in
+===================================================================
+--- git.orig/ld/Makefile.in
++++ git/ld/Makefile.in
+@@ -546,7 +546,8 @@ am__skiplex =
+ am__skipyacc =
+ ELF_CLFAGS = -DELF_LIST_OPTIONS=@elf_list_options@ \
+ -DELF_SHLIB_LIST_OPTIONS=@elf_shlib_list_options@ \
+- -DELF_PLT_UNWIND_LIST_OPTIONS=@elf_plt_unwind_list_options@
++ -DELF_PLT_UNWIND_LIST_OPTIONS=@elf_plt_unwind_list_options@ \
++ -DSYSCONFDIR="\"$(sysconfdir)\""
+
+ AM_CFLAGS = $(WARN_CFLAGS) $(ELF_CLFAGS)
+ @ENABLE_PLUGINS_FALSE@PLUGIN_C =
+Index: git/ld/emultempl/elf32.em
+===================================================================
+--- git.orig/ld/emultempl/elf32.em
++++ git/ld/emultempl/elf32.em
+@@ -1024,7 +1024,7 @@ gld${EMULATION_NAME}_check_ld_so_conf (c
+
+ info.path = NULL;
+ info.len = info.alloc = 0;
+- tmppath = concat (ld_sysroot, "${prefix}/etc/ld.so.conf",
++ tmppath = concat (ld_sysconfdir, "/ld.so.conf",
+ (const char *) NULL);
+ if (!gld${EMULATION_NAME}_parse_ld_so_conf (&info, tmppath))
+ {
+Index: git/ld/ldmain.c
+===================================================================
+--- git.orig/ld/ldmain.c
++++ git/ld/ldmain.c
+@@ -68,6 +68,7 @@ char *program_name;
+
+ /* The prefix for system library directories. */
+ const char *ld_sysroot;
++char ld_sysconfdir[4096] __attribute__ ((section (".gccrelocprefix"))) = SYSCONFDIR;
+
+ /* The canonical representation of ld_sysroot. */
+ char *ld_canon_sysroot;
+Index: git/ld/ldmain.h
+===================================================================
+--- git.orig/ld/ldmain.h
++++ git/ld/ldmain.h
+@@ -23,6 +23,7 @@
+
+ extern char *program_name;
+ extern const char *ld_sysroot;
++extern char ld_sysconfdir[4096];
+ extern char *ld_canon_sysroot;
+ extern int ld_canon_sysroot_len;
+ extern FILE *saved_script_handle;
diff --git a/meta/recipes-devtools/binutils/binutils_2.32.bb b/meta/recipes-devtools/binutils/binutils_2.32.bb
index 89315915c4..ecdab96658 100644
--- a/meta/recipes-devtools/binutils/binutils_2.32.bb
+++ b/meta/recipes-devtools/binutils/binutils_2.32.bb
@@ -51,5 +51,10 @@ do_install_class-native () {
PACKAGE_BEFORE_PN += "libbfd"
FILES_libbfd = "${libdir}/libbfd-*.so"
+SRC_URI_append_class-nativesdk = "file://nativesdk-relocation.patch"
+
+USE_ALTERNATIVES_FOR_class-nativesdk = ""
+FILES_${PN}_append_class-nativesdk = " ${bindir}"
+
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch
new file mode 100644
index 0000000000..ba4e3a3c97
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch
@@ -0,0 +1,49 @@
+From 71ba13755337e19c9a826dfc874562a36e1b24d3 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 19 Dec 2019 19:45:06 -0500
+Subject: [PATCH] e2fsck: don't try to rehash a deleted directory
+
+If directory has been deleted in pass1[bcd] processing, then we
+shouldn't try to rehash the directory in pass 3a when we try to
+rehash/reoptimize directories.
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=71ba13755337e19c9a826dfc874562a36e1b24d3]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ e2fsck/pass1b.c | 4 ++++
+ e2fsck/rehash.c | 2 ++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/e2fsck/pass1b.c b/e2fsck/pass1b.c
+index 5693b9cf..bca701ca 100644
+--- a/e2fsck/pass1b.c
++++ b/e2fsck/pass1b.c
+@@ -705,6 +705,10 @@ static void delete_file(e2fsck_t ctx, ext2_ino_t ino,
+ fix_problem(ctx, PR_1B_BLOCK_ITERATE, &pctx);
+ if (ctx->inode_bad_map)
+ ext2fs_unmark_inode_bitmap2(ctx->inode_bad_map, ino);
++ if (ctx->inode_reg_map)
++ ext2fs_unmark_inode_bitmap2(ctx->inode_reg_map, ino);
++ ext2fs_unmark_inode_bitmap2(ctx->inode_dir_map, ino);
++ ext2fs_unmark_inode_bitmap2(ctx->inode_used_map, ino);
+ ext2fs_inode_alloc_stats2(fs, ino, -1, LINUX_S_ISDIR(dp->inode.i_mode));
+ quota_data_sub(ctx->qctx, &dp->inode, ino,
+ pb.dup_blocks * fs->blocksize);
+diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
+index 3dd1e941..2c908be0 100644
+--- a/e2fsck/rehash.c
++++ b/e2fsck/rehash.c
+@@ -1028,6 +1028,8 @@ void e2fsck_rehash_directories(e2fsck_t ctx)
+ if (!ext2fs_u32_list_iterate(iter, &ino))
+ break;
+ }
++ if (!ext2fs_test_inode_bitmap2(ctx->inode_dir_map, ino))
++ continue;
+
+ pctx.dir = ino;
+ if (first) {
+--
+2.24.1
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch
new file mode 100644
index 0000000000..de4bce0037
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2019-5188.patch
@@ -0,0 +1,57 @@
+From 8dd73c149f418238f19791f9d666089ef9734dff Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 19 Dec 2019 19:37:34 -0500
+Subject: [PATCH] e2fsck: abort if there is a corrupted directory block when
+ rehashing
+
+In e2fsck pass 3a, when we are rehashing directories, at least in
+theory, all of the directories should have had corruptions with
+respect to directory entry structure fixed. However, it's possible
+(for example, if the user declined a fix) that we can reach this stage
+of processing with a corrupted directory entries.
+
+So check for that case and don't try to process a corrupted directory
+block so we don't run into trouble in mutate_name() if there is a
+zero-length file name.
+
+Addresses: TALOS-2019-0973
+Addresses: CVE-2019-5188
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+CVE: CVE-2019-5188
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=8dd73c149f418238f19791f9d666089ef9734dff]
+---
+ e2fsck/rehash.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
+index a5fc1be1..3dd1e941 100644
+--- a/e2fsck/rehash.c
++++ b/e2fsck/rehash.c
+@@ -160,6 +160,10 @@ static int fill_dir_block(ext2_filsys fs,
+ dir_offset += rec_len;
+ if (dirent->inode == 0)
+ continue;
++ if ((name_len) == 0) {
++ fd->err = EXT2_ET_DIR_CORRUPTED;
++ return BLOCK_ABORT;
++ }
+ if (!fd->compress && (name_len == 1) &&
+ (dirent->name[0] == '.'))
+ continue;
+@@ -401,6 +405,11 @@ static int duplicate_search_and_fix(e2fsck_t ctx, ext2_filsys fs,
+ continue;
+ }
+ new_len = ext2fs_dirent_name_len(ent->dir);
++ if (new_len == 0) {
++ /* should never happen */
++ ext2fs_unmark_valid(fs);
++ continue;
++ }
+ memcpy(new_name, ent->dir->name, new_len);
+ mutate_name(new_name, &new_len);
+ for (j=0; j < fd->num_array; j++) {
+--
+2.24.1
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch
new file mode 100644
index 0000000000..342a2b855b
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/e2fsck-fix-use-after-free-in-calculate_tree.patch
@@ -0,0 +1,76 @@
+From: Wang Shilong <wshilong@ddn.com>
+Date: Mon, 30 Dec 2019 19:52:39 -0500
+Subject: e2fsck: fix use after free in calculate_tree()
+
+The problem is alloc_blocks() will call get_next_block() which might
+reallocate outdir->buf, and memory address could be changed after
+this. To fix this, pointers that point into outdir->buf, such as
+int_limit and root need to be recaulated based on the new starting
+address of outdir->buf.
+
+[ Changed to correctly recalculate int_limit, and to optimize how we
+ reallocate outdir->buf. -TYT ]
+
+Addresses-Debian-Bug: 948517
+Signed-off-by: Wang Shilong <wshilong@ddn.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+(cherry picked from commit 101e73e99ccafa0403fcb27dd7413033b587ca01)
+
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?id=101e73e99ccafa0403fcb27dd7413033b587ca01]
+---
+ e2fsck/rehash.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/e2fsck/rehash.c b/e2fsck/rehash.c
+index 0a5888a9..2574e151 100644
+--- a/e2fsck/rehash.c
++++ b/e2fsck/rehash.c
+@@ -295,7 +295,11 @@ static errcode_t get_next_block(ext2_filsys fs, struct out_dir *outdir,
+ errcode_t retval;
+
+ if (outdir->num >= outdir->max) {
+- retval = alloc_size_dir(fs, outdir, outdir->max + 50);
++ int increment = outdir->max / 10;
++
++ if (increment < 50)
++ increment = 50;
++ retval = alloc_size_dir(fs, outdir, outdir->max + increment);
+ if (retval)
+ return retval;
+ }
+@@ -637,6 +641,9 @@ static int alloc_blocks(ext2_filsys fs,
+ if (retval)
+ return retval;
+
++ /* outdir->buf might be reallocated */
++ *prev_ent = (struct ext2_dx_entry *) (outdir->buf + *prev_offset);
++
+ *next_ent = set_int_node(fs, block_start);
+ *limit = (struct ext2_dx_countlimit *)(*next_ent);
+ if (next_offset)
+@@ -726,6 +733,9 @@ static errcode_t calculate_tree(ext2_filsys fs,
+ return retval;
+ }
+ if (c3 == 0) {
++ int delta1 = (char *)int_limit - outdir->buf;
++ int delta2 = (char *)root - outdir->buf;
++
+ retval = alloc_blocks(fs, &limit, &int_ent,
+ &dx_ent, &int_offset,
+ NULL, outdir, i, &c2,
+@@ -733,6 +743,11 @@ static errcode_t calculate_tree(ext2_filsys fs,
+ if (retval)
+ return retval;
+
++ /* outdir->buf might be reallocated */
++ int_limit = (struct ext2_dx_countlimit *)
++ (outdir->buf + delta1);
++ root = (struct ext2_dx_entry *)
++ (outdir->buf + delta2);
+ }
+ dx_ent->block = ext2fs_cpu_to_le32(i);
+ if (c3 != limit->limit)
+--
+2.24.1
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.3.bb b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.3.bb
index 14c05a446c..f81defb837 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.3.bb
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.3.bb
@@ -6,6 +6,9 @@ SRC_URI += "file://remove.ldconfig.call.patch \
file://mkdir_p.patch \
file://0001-misc-create_inode.c-set-dir-s-mode-correctly.patch \
file://CVE-2019-5094.patch \
+ file://CVE-2019-5188.patch \
+ file://0001-e2fsck-don-t-try-to-rehash-a-deleted-directory.patch \
+ file://e2fsck-fix-use-after-free-in-calculate_tree.patch \
"
SRC_URI_append_class-native = " file://e2fsprogs-fix-missing-check-for-permission-denied.patch \
diff --git a/meta/recipes-devtools/file/file_5.37.bb b/meta/recipes-devtools/file/file_5.37.bb
index 60fc66131e..eb0f40b54d 100644
--- a/meta/recipes-devtools/file/file_5.37.bb
+++ b/meta/recipes-devtools/file/file_5.37.bb
@@ -9,7 +9,7 @@ LICENSE = "BSD"
LIC_FILES_CHKSUM = "file://COPYING;beginline=2;md5=0251eaec1188b20d9a72c502ecfdda1b"
DEPENDS = "zlib file-replacement-native"
-DEPENDS_class-native = "zlib-native"
+DEPENDS_class-native = "zlib-native bzip2-replacement-native"
# Blacklist a bogus tag in upstream check
UPSTREAM_CHECK_GITTAGREGEX = "FILE(?P<pver>(?!6_23).+)"
diff --git a/meta/recipes-devtools/gcc/gcc-9.2.inc b/meta/recipes-devtools/gcc/gcc-9.2.inc
index c6395998d5..4f068231f3 100644
--- a/meta/recipes-devtools/gcc/gcc-9.2.inc
+++ b/meta/recipes-devtools/gcc/gcc-9.2.inc
@@ -68,6 +68,7 @@ SRC_URI = "\
file://CVE-2019-15847_1.patch \
file://CVE-2019-15847_2.patch \
file://CVE-2019-15847_3.patch \
+ file://re-PR-target-91102-aarch64-ICE-on-Linux-kernel-with-.patch \
"
S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${PV}"
SRC_URI[md5sum] = "3818ad8600447f05349098232c2ddc78"
diff --git a/meta/recipes-devtools/gcc/gcc-9.2/re-PR-target-91102-aarch64-ICE-on-Linux-kernel-with-.patch b/meta/recipes-devtools/gcc/gcc-9.2/re-PR-target-91102-aarch64-ICE-on-Linux-kernel-with-.patch
new file mode 100644
index 0000000000..c37e0bb9dd
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc-9.2/re-PR-target-91102-aarch64-ICE-on-Linux-kernel-with-.patch
@@ -0,0 +1,95 @@
+From efb0ee06f5c0186c2d1442ecd4dbbd55dbd97b44 Mon Sep 17 00:00:00 2001
+From: Vladimir Makarov <vmakarov@redhat.com>
+Date: Wed, 10 Jul 2019 16:07:10 +0000
+Subject: [PATCH] re PR target/91102 (aarch64 ICE on Linux kernel with -Os
+ starting with r270266)
+
+2019-07-10 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR target/91102
+ * lra-constraints.c (process_alt_operands): Don't match user
+ defined regs only if they are early clobbers.
+
+2019-07-10 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR target/91102
+ * gcc.target/aarch64/pr91102.c: New test.
+
+From-SVN: r273357
+Upstream-Status: Backport [https://github.com/gcc-mirror/gcc/commit/613caed2feb9cfc8158308670b59df3d031ec629]
+[takondra: dropped conflicting ChangeLog changes]
+Signed-off-by: Taras Kondratiuk <takondra@cisco.com>
+---
+ gcc/lra-constraints.c | 17 ++++++++++----
+ gcc/testsuite/gcc.target/aarch64/pr91102.c | 26 ++++++++++++++++++++++
+ 2 files changed, 39 insertions(+), 4 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/pr91102.c
+
+diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c
+index cf33da8013e4..6382dbf852b6 100644
+--- a/gcc/lra-constraints.c
++++ b/gcc/lra-constraints.c
+@@ -2172,8 +2172,9 @@ process_alt_operands (int only_alternative)
+ else
+ {
+ /* Operands don't match. If the operands are
+- different user defined explicit hard registers,
+- then we cannot make them match. */
++ different user defined explicit hard
++ registers, then we cannot make them match
++ when one is early clobber operand. */
+ if ((REG_P (*curr_id->operand_loc[nop])
+ || SUBREG_P (*curr_id->operand_loc[nop]))
+ && (REG_P (*curr_id->operand_loc[m])
+@@ -2192,9 +2193,17 @@ process_alt_operands (int only_alternative)
+ && REG_P (m_reg)
+ && HARD_REGISTER_P (m_reg)
+ && REG_USERVAR_P (m_reg))
+- break;
++ {
++ int i;
++
++ for (i = 0; i < early_clobbered_regs_num; i++)
++ if (m == early_clobbered_nops[i])
++ break;
++ if (i < early_clobbered_regs_num
++ || early_clobber_p)
++ break;
++ }
+ }
+-
+ /* Both operands must allow a reload register,
+ otherwise we cannot make them match. */
+ if (curr_alt[m] == NO_REGS)
+diff --git a/gcc/testsuite/gcc.target/aarch64/pr91102.c b/gcc/testsuite/gcc.target/aarch64/pr91102.c
+new file mode 100644
+index 000000000000..70b99045a48e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/pr91102.c
+@@ -0,0 +1,26 @@
++/* PR target/91102 */
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++
++int
++foo (long d, long l)
++{
++ register long e asm ("x1") = d;
++ register long f asm("x2") = l;
++ asm ("" : : "r" (e), "r" (f));
++ return 3;
++}
++
++struct T { int i; int j; };
++union S { long h; struct T t; };
++
++void
++bar (union S b)
++{
++ while (1)
++ {
++ union S c = b;
++ c.t.j++;
++ b.h = foo (b.h, c.h);
++ }
++}
diff --git a/meta/recipes-devtools/gcc/gcc-cross-canadian.inc b/meta/recipes-devtools/gcc/gcc-cross-canadian.inc
index f14cbf7152..4aac345bec 100644
--- a/meta/recipes-devtools/gcc/gcc-cross-canadian.inc
+++ b/meta/recipes-devtools/gcc/gcc-cross-canadian.inc
@@ -158,7 +158,7 @@ SYSTEMLIBS1 = "${target_libdir}/"
EXTRA_OECONF += "--enable-poison-system-directories"
EXTRA_OECONF_remove_elf = "--with-sysroot=/not/exist"
EXTRA_OECONF_remove_eabi = "--with-sysroot=/not/exist"
-EXTRA_OECONF_append_elf = "--without-headers --with-newlib"
-EXTRA_OECONF_append_eabi = "--without-headers --with-newlib"
+EXTRA_OECONF_append_elf = " --without-headers --with-newlib"
+EXTRA_OECONF_append_eabi = " --without-headers --with-newlib"
# gcc 4.7 needs -isystem
export ARCH_FLAGS_FOR_TARGET = "--sysroot=${STAGING_DIR_TARGET} -isystem=${target_includedir}"
diff --git a/meta/recipes-devtools/gcc/gcc-cross.inc b/meta/recipes-devtools/gcc/gcc-cross.inc
index 8855bb1f34..06ba3ccd15 100644
--- a/meta/recipes-devtools/gcc/gcc-cross.inc
+++ b/meta/recipes-devtools/gcc/gcc-cross.inc
@@ -61,6 +61,13 @@ do_compile () {
export CXXFLAGS_FOR_TARGET="${TARGET_CXXFLAGS}"
export LDFLAGS_FOR_TARGET="${TARGET_LDFLAGS}"
+ # Prevent native/host sysroot path from being used in configargs.h header,
+ # as it will be rewritten when used by other sysroots preventing support
+ # for gcc plugins
+ oe_runmake configure-gcc
+ sed -i 's@${STAGING_DIR_TARGET}@/host@g' ${B}/gcc/configargs.h
+ sed -i 's@${STAGING_DIR_HOST}@/host@g' ${B}/gcc/configargs.h
+
oe_runmake all-host configure-target-libgcc
(cd ${B}/${TARGET_SYS}/libgcc; oe_runmake enable-execute-stack.c unwind.h md-unwind-support.h sfp-machine.h gthr-default.h)
# now generate script to drive testing
diff --git a/meta/recipes-devtools/gcc/gcc-runtime.inc b/meta/recipes-devtools/gcc/gcc-runtime.inc
index 2da3c02ef0..536b18d97f 100644
--- a/meta/recipes-devtools/gcc/gcc-runtime.inc
+++ b/meta/recipes-devtools/gcc/gcc-runtime.inc
@@ -302,10 +302,6 @@ do_check() {
# HACK: this works around the configure setting CXX with -nostd* args
sed -i 's/-nostdinc++ -nostdlib++//g' $(find ${B} -name testsuite_flags | head -1)
- # HACK: this works around the de-stashing changes to configargs.h, as well as recipe-sysroot changing the content
- sed -i '/static const char configuration_arguments/d' ${B}/gcc/configargs.h
- ${CC} -v 2>&1 | grep "^Configured with:" | \
- sed 's/Configured with: \(.*\)/static const char configuration_arguments[] = "\1";/g' >> ${B}/gcc/configargs.h
if [ "${TOOLCHAIN_TEST_TARGET}" = "user" ]; then
# qemu user has issues allocating large amounts of memory
diff --git a/meta/recipes-devtools/gcc/gcc-target.inc b/meta/recipes-devtools/gcc/gcc-target.inc
index bdc6ff658f..987e88d32c 100644
--- a/meta/recipes-devtools/gcc/gcc-target.inc
+++ b/meta/recipes-devtools/gcc/gcc-target.inc
@@ -137,6 +137,14 @@ FILES_${PN}-doc = "\
"
do_compile () {
+ # Prevent full target sysroot path from being used in configargs.h header,
+ # as it will be rewritten when used by other sysroots preventing support
+ # for gcc plugins. Additionally the path is embeddeded into the output
+ # binary, this prevents building a reproducible binary.
+ oe_runmake configure-gcc
+ sed -i 's@${STAGING_DIR_TARGET}@/@g' ${B}/gcc/configargs.h
+ sed -i 's@${STAGING_DIR_HOST}@/@g' ${B}/gcc/configargs.h
+
oe_runmake all-host
}
diff --git a/meta/recipes-devtools/git/git.inc b/meta/recipes-devtools/git/git.inc
index 6e137432f0..a0ce1626a1 100644
--- a/meta/recipes-devtools/git/git.inc
+++ b/meta/recipes-devtools/git/git.inc
@@ -7,7 +7,21 @@ DEPENDS = "openssl curl zlib expat"
PROVIDES_append_class-native = " git-replacement-native"
SRC_URI = "${KERNELORG_MIRROR}/software/scm/git/git-${PV}.tar.gz;name=tarball \
- ${KERNELORG_MIRROR}/software/scm/git/git-manpages-${PV}.tar.gz;name=manpages"
+ ${KERNELORG_MIRROR}/software/scm/git/git-manpages-${PV}.tar.gz;name=manpages \
+ file://CVE-2020-5260.patch \
+ file://0001-t-lib-credential-use-test_i18ncmp-to-check-stderr.patch \
+ file://0002-credential-detect-unrepresentable-values-when-parsin.patch \
+ file://0003-fsck-detect-gitmodules-URLs-with-embedded-newlines.patch \
+ file://CVE-2020-11008-1.patch \
+ file://CVE-2020-11008-2.patch \
+ file://CVE-2020-11008-3.patch \
+ file://CVE-2020-11008-4.patch \
+ file://CVE-2020-11008-5.patch \
+ file://CVE-2020-11008-6.patch \
+ file://CVE-2020-11008-7.patch \
+ file://CVE-2020-11008-8.patch \
+ file://CVE-2020-11008-9.patch \
+ "
S = "${WORKDIR}/git-${PV}"
diff --git a/meta/recipes-devtools/git/git/0001-t-lib-credential-use-test_i18ncmp-to-check-stderr.patch b/meta/recipes-devtools/git/git/0001-t-lib-credential-use-test_i18ncmp-to-check-stderr.patch
new file mode 100644
index 0000000000..6eb3c16aef
--- /dev/null
+++ b/meta/recipes-devtools/git/git/0001-t-lib-credential-use-test_i18ncmp-to-check-stderr.patch
@@ -0,0 +1,35 @@
+From 70ef9c6ce884b2d466d3d36563f1d2aa31b56443 Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Wed, 11 Mar 2020 18:11:37 -0400
+Subject: [PATCH 01/12] t/lib-credential: use test_i18ncmp to check stderr
+
+The credential tests have a "check" function which feeds some input to
+git-credential and checks the stdout and stderr. We look for exact
+matches in the output. For stdout, this makes sense; the output is
+the credential protocol. But for stderr, we may be showing various
+diagnostic messages, or the prompts fed to the askpass program, which
+could be translated. Let's mark them as such.
+
+Upstream-Status: Backport
+
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ t/lib-credential.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/t/lib-credential.sh b/t/lib-credential.sh
+index 937b831..bb88cc0 100755
+--- a/t/lib-credential.sh
++++ b/t/lib-credential.sh
+@@ -19,7 +19,7 @@ check() {
+ false
+ fi &&
+ test_cmp expect-stdout stdout &&
+- test_cmp expect-stderr stderr
++ test_i18ncmp expect-stderr stderr
+ }
+
+ read_chunk() {
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/0002-credential-detect-unrepresentable-values-when-parsin.patch b/meta/recipes-devtools/git/git/0002-credential-detect-unrepresentable-values-when-parsin.patch
new file mode 100644
index 0000000000..a9b7348ef7
--- /dev/null
+++ b/meta/recipes-devtools/git/git/0002-credential-detect-unrepresentable-values-when-parsin.patch
@@ -0,0 +1,156 @@
+From 43803880b954a020dbffa5250a5b7fd893442c7c Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Thu, 12 Mar 2020 01:31:11 -0400
+Subject: [PATCH 02/12] credential: detect unrepresentable values when parsing
+ urls
+
+The credential protocol can't represent newlines in values, but URLs can
+embed percent-encoded newlines in various components. A previous commit
+taught the low-level writing routines to die() when encountering this,
+but we can be a little friendlier to the user by detecting them earlier
+and handling them gracefully.
+
+This patch teaches credential_from_url() to notice such components,
+issue a warning, and blank the credential (which will generally result
+in prompting the user for a username and password). We blank the whole
+credential in this case. Another option would be to blank only the
+invalid component. However, we're probably better off not feeding a
+partially-parsed URL result to a credential helper. We don't know how a
+given helper would handle it, so we're better off to err on the side of
+matching nothing rather than something unexpected.
+
+The die() call in credential_write() is _probably_ impossible to reach
+after this patch. Values should end up in credential structs only by URL
+parsing (which is covered here), or by reading credential protocol input
+(which by definition cannot read a newline into a value). But we should
+definitely keep the low-level check, as it's our final and most accurate
+line of defense against protocol injection attacks. Arguably it could
+become a BUG(), but it probably doesn't matter much either way.
+
+Note that the public interface of credential_from_url() grows a little
+more than we need here. We'll use the extra flexibility in a future
+patch to help fsck catch these cases.
+
+Upstream-Status: Backport
+
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ credential.c | 36 ++++++++++++++++++++++++++++++++++--
+ credential.h | 16 ++++++++++++++++
+ t/t0300-credentials.sh | 12 ++++++++++--
+ 3 files changed, 60 insertions(+), 4 deletions(-)
+
+diff --git a/credential.c b/credential.c
+index a79aff0..2482382 100644
+--- a/credential.c
++++ b/credential.c
+@@ -324,7 +324,22 @@ void credential_reject(struct credential *c)
+ c->approved = 0;
+ }
+
+-void credential_from_url(struct credential *c, const char *url)
++static int check_url_component(const char *url, int quiet,
++ const char *name, const char *value)
++{
++ if (!value)
++ return 0;
++ if (!strchr(value, '\n'))
++ return 0;
++
++ if (!quiet)
++ warning(_("url contains a newline in its %s component: %s"),
++ name, url);
++ return -1;
++}
++
++int credential_from_url_gently(struct credential *c, const char *url,
++ int quiet)
+ {
+ const char *at, *colon, *cp, *slash, *host, *proto_end;
+
+@@ -338,7 +353,7 @@ void credential_from_url(struct credential *c, const char *url)
+ */
+ proto_end = strstr(url, "://");
+ if (!proto_end)
+- return;
++ return 0;
+ cp = proto_end + 3;
+ at = strchr(cp, '@');
+ colon = strchr(cp, ':');
+@@ -373,4 +388,21 @@ void credential_from_url(struct credential *c, const char *url)
+ while (p > c->path && *p == '/')
+ *p-- = '\0';
+ }
++
++ if (check_url_component(url, quiet, "username", c->username) < 0 ||
++ check_url_component(url, quiet, "password", c->password) < 0 ||
++ check_url_component(url, quiet, "protocol", c->protocol) < 0 ||
++ check_url_component(url, quiet, "host", c->host) < 0 ||
++ check_url_component(url, quiet, "path", c->path) < 0)
++ return -1;
++
++ return 0;
++}
++
++void credential_from_url(struct credential *c, const char *url)
++{
++ if (credential_from_url_gently(c, url, 0) < 0) {
++ warning(_("skipping credential lookup for url: %s"), url);
++ credential_clear(c);
++ }
+ }
+diff --git a/credential.h b/credential.h
+index 6b0cd16..122a23c 100644
+--- a/credential.h
++++ b/credential.h
+@@ -28,7 +28,23 @@ struct credential {
+
+ int credential_read(struct credential *, FILE *);
+ void credential_write(const struct credential *, FILE *);
++
++/*
++ * Parse a url into a credential struct, replacing any existing contents.
++ *
++ * Ifthe url can't be parsed (e.g., a missing "proto://" component), the
++ * resulting credential will be empty but we'll still return success from the
++ * "gently" form.
++ *
++ * If we encounter a component which cannot be represented as a credential
++ * value (e.g., because it contains a newline), the "gently" form will return
++ * an error but leave the broken state in the credential object for further
++ * examination. The non-gentle form will issue a warning to stderr and return
++ * an empty credential.
++ */
+ void credential_from_url(struct credential *, const char *url);
++int credential_from_url_gently(struct credential *, const char *url, int quiet);
++
+ int credential_match(const struct credential *have,
+ const struct credential *want);
+
+diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
+index 26f3c3a..b9c0f1f 100755
+--- a/t/t0300-credentials.sh
++++ b/t/t0300-credentials.sh
+@@ -308,9 +308,17 @@ test_expect_success 'empty helper spec resets helper list' '
+ EOF
+ '
+
+-test_expect_success 'url parser rejects embedded newlines' '
+- test_must_fail git credential fill <<-\EOF
++test_expect_success 'url parser ignores embedded newlines' '
++ check fill <<-EOF
+ url=https://one.example.com?%0ahost=two.example.com/
++ --
++ username=askpass-username
++ password=askpass-password
++ --
++ warning: url contains a newline in its host component: https://one.example.com?%0ahost=two.example.com/
++ warning: skipping credential lookup for url: https://one.example.com?%0ahost=two.example.com/
++ askpass: Username:
++ askpass: Password:
+ EOF
+ '
+
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/0003-fsck-detect-gitmodules-URLs-with-embedded-newlines.patch b/meta/recipes-devtools/git/git/0003-fsck-detect-gitmodules-URLs-with-embedded-newlines.patch
new file mode 100644
index 0000000000..23931e6313
--- /dev/null
+++ b/meta/recipes-devtools/git/git/0003-fsck-detect-gitmodules-URLs-with-embedded-newlines.patch
@@ -0,0 +1,103 @@
+From 1c9f8cedd34302575db40016231bdf502f17901e Mon Sep 17 00:00:00 2001
+From: Li Zhou <li.zhou@windriver.com>
+Date: Mon, 27 Apr 2020 13:49:39 +0800
+Subject: [PATCH 03/12] fsck: detect gitmodules URLs with embedded newlines
+
+The credential protocol can't handle values with newlines. We already
+detect and block any such URLs from being used with credential helpers,
+but let's also add an fsck check to detect and block gitmodules files
+with such URLs. That will let us notice the problem earlier when
+transfer.fsckObjects is turned on. And in particular it will prevent bad
+objects from spreading, which may protect downstream users running older
+versions of Git.
+
+We'll file this under the existing gitmodulesUrl flag, which covers URLs
+with option injection. There's really no need to distinguish the exact
+flaw in the URL in this context. Likewise, I've expanded the description
+of t7416 to cover all types of bogus URLs.
+
+Upstream-Status: Backport
+
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ fsck.c | 16 +++++++++++++++-
+ t/t7416-submodule-dash-url.sh | 18 +++++++++++++++++-
+ 2 files changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/fsck.c b/fsck.c
+index ef8b343..ea46eea 100644
+--- a/fsck.c
++++ b/fsck.c
+@@ -15,6 +15,7 @@
+ #include "packfile.h"
+ #include "submodule-config.h"
+ #include "config.h"
++#include "credential.h"
+ #include "help.h"
+
+ static struct oidset gitmodules_found = OIDSET_INIT;
+@@ -947,6 +948,19 @@ static int fsck_tag(struct tag *tag, const char *data,
+ return fsck_tag_buffer(tag, data, size, options);
+ }
+
++static int check_submodule_url(const char *url)
++{
++ struct credential c = CREDENTIAL_INIT;
++ int ret;
++
++ if (looks_like_command_line_option(url))
++ return -1;
++
++ ret = credential_from_url_gently(&c, url, 1);
++ credential_clear(&c);
++ return ret;
++}
++
+ struct fsck_gitmodules_data {
+ struct object *obj;
+ struct fsck_options *options;
+@@ -971,7 +985,7 @@ static int fsck_gitmodules_fn(const char *var, const char *value, void *vdata)
+ "disallowed submodule name: %s",
+ name);
+ if (!strcmp(key, "url") && value &&
+- looks_like_command_line_option(value))
++ check_submodule_url(value) < 0)
+ data->ret |= report(data->options, data->obj,
+ FSCK_MSG_GITMODULES_URL,
+ "disallowed submodule url: %s",
+diff --git a/t/t7416-submodule-dash-url.sh b/t/t7416-submodule-dash-url.sh
+index 5ba041f..41431b1 100755
+--- a/t/t7416-submodule-dash-url.sh
++++ b/t/t7416-submodule-dash-url.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-test_description='check handling of .gitmodule url with dash'
++test_description='check handling of disallowed .gitmodule urls'
+ . ./test-lib.sh
+
+ test_expect_success 'create submodule with protected dash in url' '
+@@ -60,4 +60,20 @@ test_expect_success 'trailing backslash is handled correctly' '
+ test_i18ngrep ! "unknown option" err
+ '
+
++test_expect_success 'fsck rejects embedded newline in url' '
++ # create an orphan branch to avoid existing .gitmodules objects
++ git checkout --orphan newline &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = "https://one.example.com?%0ahost=two.example.com/foo.git"
++ EOF
++ git add .gitmodules &&
++ git commit -m "gitmodules with newline" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ test_must_fail git push dst HEAD 2>err &&
++ grep gitmodulesUrl err
++'
++
+ test_done
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-1.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-1.patch
new file mode 100644
index 0000000000..9cf98ea7b4
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-1.patch
@@ -0,0 +1,70 @@
+From 863f8067d8b4012904ca3bb881c659ac9894df97 Mon Sep 17 00:00:00 2001
+From: Li Zhou <li.zhou@windriver.com>
+Date: Mon, 27 Apr 2020 14:36:03 +0800
+Subject: [PATCH 04/12] t0300: make "quit" helper more realistic
+
+We test a toy credential helper that writes "quit=1" and confirms that
+we stop running other helpers. However, that helper is unrealistic in
+that it does not bother to read its stdin at all.
+
+For now we don't send any input to it, because we feed git-credential a
+blank credential. But that will change in the next patch, which will
+cause this test to racily fail, as git-credential will get SIGPIPE
+writing to the helper rather than exiting because it was asked to.
+
+Let's make this one-off helper more like our other sample helpers, and
+have it source the "dump" script. That will read stdin, fixing the
+SIGPIPE problem. But it will also write what it sees to stderr. We can
+make the test more robust by checking that output, which confirms that
+we do run the quit helper, don't run any other helpers, and exit for the
+reason we expected.
+
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (1)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ t/t0300-credentials.sh | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
+index b9c0f1f..0206b3b 100755
+--- a/t/t0300-credentials.sh
++++ b/t/t0300-credentials.sh
+@@ -22,6 +22,11 @@ test_expect_success 'setup helper scripts' '
+ exit 0
+ EOF
+
++ write_script git-credential-quit <<-\EOF &&
++ . ./dump
++ echo quit=1
++ EOF
++
+ write_script git-credential-verbatim <<-\EOF &&
+ user=$1; shift
+ pass=$1; shift
+@@ -291,10 +296,16 @@ test_expect_success 'http paths can be part of context' '
+
+ test_expect_success 'helpers can abort the process' '
+ test_must_fail git \
+- -c credential.helper="!f() { echo quit=1; }; f" \
++ -c credential.helper=quit \
+ -c credential.helper="verbatim foo bar" \
+- credential fill >stdout &&
+- test_must_be_empty stdout
++ credential fill >stdout 2>stderr &&
++ >expect &&
++ test_cmp expect stdout &&
++ cat >expect <<-\EOF &&
++ quit: get
++ fatal: credential helper '\''quit'\'' told us to quit
++ EOF
++ test_i18ncmp expect stderr
+ '
+
+ test_expect_success 'empty helper spec resets helper list' '
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-2.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-2.patch
new file mode 100644
index 0000000000..c752e3d431
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-2.patch
@@ -0,0 +1,292 @@
+From 5588659069214aa0f7fea75a69687078e2f7a817 Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Sat, 18 Apr 2020 20:47:30 -0700
+Subject: [PATCH 05/12] t0300: use more realistic inputs
+
+Many of the tests in t0300 give partial inputs to git-credential,
+omitting a protocol or hostname. We're checking only high-level things
+like whether and how helpers are invoked at all, and we don't care about
+specific hosts. However, in preparation for tightening up the rules
+about when we're willing to run a helper, let's start using input that's
+a bit more realistic: pretend as if http://example.com is being
+examined.
+
+This shouldn't change the point of any of the tests, but do note we have
+to adjust the expected output to accommodate this (filling a credential
+will repeat back the protocol/host fields to stdout, and the helper
+debug messages and askpass prompt will change on stderr).
+
+Signed-off-by: Jeff King <peff@peff.net>
+Reviewed-by: Taylor Blau <me@ttaylorr.com>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (2)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ t/t0300-credentials.sh | 89 +++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 85 insertions(+), 4 deletions(-)
+
+diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
+index 0206b3b..f4c5d7f 100755
+--- a/t/t0300-credentials.sh
++++ b/t/t0300-credentials.sh
+@@ -40,43 +40,71 @@ test_expect_success 'setup helper scripts' '
+
+ test_expect_success 'credential_fill invokes helper' '
+ check fill "verbatim foo bar" <<-\EOF
++ protocol=http
++ host=example.com
+ --
++ protocol=http
++ host=example.com
+ username=foo
+ password=bar
+ --
+ verbatim: get
++ verbatim: protocol=http
++ verbatim: host=example.com
+ EOF
+ '
+
+ test_expect_success 'credential_fill invokes multiple helpers' '
+ check fill useless "verbatim foo bar" <<-\EOF
++ protocol=http
++ host=example.com
+ --
++ protocol=http
++ host=example.com
+ username=foo
+ password=bar
+ --
+ useless: get
++ useless: protocol=http
++ useless: host=example.com
+ verbatim: get
++ verbatim: protocol=http
++ verbatim: host=example.com
+ EOF
+ '
+
+ test_expect_success 'credential_fill stops when we get a full response' '
+ check fill "verbatim one two" "verbatim three four" <<-\EOF
++ protocol=http
++ host=example.com
+ --
++ protocol=http
++ host=example.com
+ username=one
+ password=two
+ --
+ verbatim: get
++ verbatim: protocol=http
++ verbatim: host=example.com
+ EOF
+ '
+
+ test_expect_success 'credential_fill continues through partial response' '
+ check fill "verbatim one \"\"" "verbatim two three" <<-\EOF
++ protocol=http
++ host=example.com
+ --
++ protocol=http
++ host=example.com
+ username=two
+ password=three
+ --
+ verbatim: get
++ verbatim: protocol=http
++ verbatim: host=example.com
+ verbatim: get
++ verbatim: protocol=http
++ verbatim: host=example.com
+ verbatim: username=one
+ EOF
+ '
+@@ -102,14 +130,20 @@ test_expect_success 'credential_fill passes along metadata' '
+
+ test_expect_success 'credential_approve calls all helpers' '
+ check approve useless "verbatim one two" <<-\EOF
++ protocol=http
++ host=example.com
+ username=foo
+ password=bar
+ --
+ --
+ useless: store
++ useless: protocol=http
++ useless: host=example.com
+ useless: username=foo
+ useless: password=bar
+ verbatim: store
++ verbatim: protocol=http
++ verbatim: host=example.com
+ verbatim: username=foo
+ verbatim: password=bar
+ EOF
+@@ -117,6 +151,8 @@ test_expect_success 'credential_approve calls all helpers' '
+
+ test_expect_success 'do not bother storing password-less credential' '
+ check approve useless <<-\EOF
++ protocol=http
++ host=example.com
+ username=foo
+ --
+ --
+@@ -126,14 +162,20 @@ test_expect_success 'do not bother storing password-less credential' '
+
+ test_expect_success 'credential_reject calls all helpers' '
+ check reject useless "verbatim one two" <<-\EOF
++ protocol=http
++ host=example.com
+ username=foo
+ password=bar
+ --
+ --
+ useless: erase
++ useless: protocol=http
++ useless: host=example.com
+ useless: username=foo
+ useless: password=bar
+ verbatim: erase
++ verbatim: protocol=http
++ verbatim: host=example.com
+ verbatim: username=foo
+ verbatim: password=bar
+ EOF
+@@ -141,33 +183,49 @@ test_expect_success 'credential_reject calls all helpers' '
+
+ test_expect_success 'usernames can be preserved' '
+ check fill "verbatim \"\" three" <<-\EOF
++ protocol=http
++ host=example.com
+ username=one
+ --
++ protocol=http
++ host=example.com
+ username=one
+ password=three
+ --
+ verbatim: get
++ verbatim: protocol=http
++ verbatim: host=example.com
+ verbatim: username=one
+ EOF
+ '
+
+ test_expect_success 'usernames can be overridden' '
+ check fill "verbatim two three" <<-\EOF
++ protocol=http
++ host=example.com
+ username=one
+ --
++ protocol=http
++ host=example.com
+ username=two
+ password=three
+ --
+ verbatim: get
++ verbatim: protocol=http
++ verbatim: host=example.com
+ verbatim: username=one
+ EOF
+ '
+
+ test_expect_success 'do not bother completing already-full credential' '
+ check fill "verbatim three four" <<-\EOF
++ protocol=http
++ host=example.com
+ username=one
+ password=two
+ --
++ protocol=http
++ host=example.com
+ username=one
+ password=two
+ --
+@@ -179,23 +237,31 @@ test_expect_success 'do not bother completing already-full credential' '
+ # askpass helper is run, we know the internal getpass is working.
+ test_expect_success 'empty helper list falls back to internal getpass' '
+ check fill <<-\EOF
++ protocol=http
++ host=example.com
+ --
++ protocol=http
++ host=example.com
+ username=askpass-username
+ password=askpass-password
+ --
+- askpass: Username:
+- askpass: Password:
++ askpass: Username for '\''http://example.com'\'':
++ askpass: Password for '\''http://askpass-username@example.com'\'':
+ EOF
+ '
+
+ test_expect_success 'internal getpass does not ask for known username' '
+ check fill <<-\EOF
++ protocol=http
++ host=example.com
+ username=foo
+ --
++ protocol=http
++ host=example.com
+ username=foo
+ password=askpass-password
+ --
+- askpass: Password:
++ askpass: Password for '\''http://foo@example.com'\'':
+ EOF
+ '
+
+@@ -207,7 +273,11 @@ HELPER="!f() {
+ test_expect_success 'respect configured credentials' '
+ test_config credential.helper "$HELPER" &&
+ check fill <<-\EOF
++ protocol=http
++ host=example.com
+ --
++ protocol=http
++ host=example.com
+ username=foo
+ password=bar
+ --
+@@ -298,11 +368,16 @@ test_expect_success 'helpers can abort the process' '
+ test_must_fail git \
+ -c credential.helper=quit \
+ -c credential.helper="verbatim foo bar" \
+- credential fill >stdout 2>stderr &&
++ credential fill >stdout 2>stderr <<-\EOF &&
++ protocol=http
++ host=example.com
++ EOF
+ >expect &&
+ test_cmp expect stdout &&
+ cat >expect <<-\EOF &&
+ quit: get
++ quit: protocol=http
++ quit: host=example.com
+ fatal: credential helper '\''quit'\'' told us to quit
+ EOF
+ test_i18ncmp expect stderr
+@@ -311,11 +386,17 @@ test_expect_success 'helpers can abort the process' '
+ test_expect_success 'empty helper spec resets helper list' '
+ test_config credential.helper "verbatim file file" &&
+ check fill "" "verbatim cmdline cmdline" <<-\EOF
++ protocol=http
++ host=example.com
+ --
++ protocol=http
++ host=example.com
+ username=cmdline
+ password=cmdline
+ --
+ verbatim: get
++ verbatim: protocol=http
++ verbatim: host=example.com
+ EOF
+ '
+
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-3.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-3.patch
new file mode 100644
index 0000000000..c17e883d6c
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-3.patch
@@ -0,0 +1,97 @@
+From 22f28251ae575dd7a60f7a46853469025d004ca7 Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Sat, 18 Apr 2020 20:48:05 -0700
+Subject: [PATCH 06/12] credential: parse URL without host as empty host, not
+ unset
+
+We may feed a URL like "cert:///path/to/cert.pem" into the credential
+machinery to get the key for a client-side certificate. That
+credential has no hostname field, which is about to be disallowed (to
+avoid confusion with protocols where a helper _would_ expect a
+hostname).
+
+This means as of the next patch, credential helpers won't work for
+unlocking certs. Let's fix that by doing two things:
+
+ - when we parse a url with an empty host, set the host field to the
+ empty string (asking only to match stored entries with an empty
+ host) rather than NULL (asking to match _any_ host).
+
+ - when we build a cert:// credential by hand, similarly assign an
+ empty string
+
+It's the latter that is more likely to impact real users in practice,
+since it's what's used for http connections. But we don't have good
+infrastructure to test it.
+
+The url-parsing version will help anybody using git-credential in a
+script, and is easy to test.
+
+Signed-off-by: Jeff King <peff@peff.net>
+Reviewed-by: Taylor Blau <me@ttaylorr.com>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (3)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ credential.c | 3 +--
+ http.c | 1 +
+ t/t0300-credentials.sh | 17 +++++++++++++++++
+ 3 files changed, 19 insertions(+), 2 deletions(-)
+
+diff --git a/credential.c b/credential.c
+index 2482382..f2413ce 100644
+--- a/credential.c
++++ b/credential.c
+@@ -376,8 +376,7 @@ int credential_from_url_gently(struct credential *c, const char *url,
+
+ if (proto_end - url > 0)
+ c->protocol = xmemdupz(url, proto_end - url);
+- if (slash - host > 0)
+- c->host = url_decode_mem(host, slash - host);
++ c->host = url_decode_mem(host, slash - host);
+ /* Trim leading and trailing slashes from path */
+ while (*slash == '/')
+ slash++;
+diff --git a/http.c b/http.c
+index 27aa0a3..c4dfdac 100644
+--- a/http.c
++++ b/http.c
+@@ -558,6 +558,7 @@ static int has_cert_password(void)
+ return 0;
+ if (!cert_auth.password) {
+ cert_auth.protocol = xstrdup("cert");
++ cert_auth.host = xstrdup("");
+ cert_auth.username = xstrdup("");
+ cert_auth.path = xstrdup(ssl_cert);
+ credential_fill(&cert_auth);
+diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
+index f4c5d7f..1c1010b 100755
+--- a/t/t0300-credentials.sh
++++ b/t/t0300-credentials.sh
+@@ -414,4 +414,21 @@ test_expect_success 'url parser ignores embedded newlines' '
+ EOF
+ '
+
++test_expect_success 'host-less URLs are parsed as empty host' '
++ check fill "verbatim foo bar" <<-\EOF
++ url=cert:///path/to/cert.pem
++ --
++ protocol=cert
++ host=
++ path=path/to/cert.pem
++ username=foo
++ password=bar
++ --
++ verbatim: get
++ verbatim: protocol=cert
++ verbatim: host=
++ verbatim: path=path/to/cert.pem
++ EOF
++'
++
+ test_done
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-4.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-4.patch
new file mode 100644
index 0000000000..14e23466d4
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-4.patch
@@ -0,0 +1,173 @@
+From f8bf7099379990ad974c1ca8f51e1f28bf18cf2a Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Sat, 18 Apr 2020 20:50:48 -0700
+Subject: [PATCH 07/12] credential: refuse to operate when missing host or
+ protocol
+
+The credential helper protocol was designed to be very flexible: the
+fields it takes as input are treated as a pattern, and any missing
+fields are taken as wildcards. This allows unusual things like:
+
+ echo protocol=https | git credential reject
+
+to delete all stored https credentials (assuming the helpers themselves
+treat the input that way). But when helpers are invoked automatically by
+Git, this flexibility works against us. If for whatever reason we don't
+have a "host" field, then we'd match _any_ host. When you're filling a
+credential to send to a remote server, this is almost certainly not what
+you want.
+
+Prevent this at the layer that writes to the credential helper. Add a
+check to the credential API that the host and protocol are always passed
+in, and add an assertion to the credential_write function that speaks
+credential helper protocol to be doubly sure.
+
+There are a few ways this can be triggered in practice:
+
+ - the "git credential" command passes along arbitrary credential
+ parameters it reads from stdin.
+
+ - until the previous patch, when the host field of a URL is empty, we
+ would leave it unset (rather than setting it to the empty string)
+
+ - a URL like "example.com/foo.git" is treated by curl as if "http://"
+ was present, but our parser sees it as a non-URL and leaves all
+ fields unset
+
+ - the recent fix for URLs with embedded newlines blanks the URL but
+ otherwise continues. Rather than having the desired effect of
+ looking up no credential at all, many helpers will return _any_
+ credential
+
+Our earlier test for an embedded newline didn't catch this because it
+only checked that the credential was cleared, but didn't configure an
+actual helper. Configuring the "verbatim" helper in the test would show
+that it is invoked (it's obviously a silly helper which doesn't look at
+its input, but the point is that it shouldn't be run at all). Since
+we're switching this case to die(), we don't need to bother with a
+helper. We can see the new behavior just by checking that the operation
+fails.
+
+We'll add new tests covering partial input as well (these can be
+triggered through various means with url-parsing, but it's simpler to
+just check them directly, as we know we are covered even if the url
+parser changes behavior in the future).
+
+[jn: changed to die() instead of logging and showing a manual
+ username/password prompt]
+
+Reported-by: Carlo Arenas <carenas@gmail.com>
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (4)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ credential.c | 20 ++++++++++++++------
+ t/t0300-credentials.sh | 34 ++++++++++++++++++++++++++--------
+ 2 files changed, 40 insertions(+), 14 deletions(-)
+
+diff --git a/credential.c b/credential.c
+index f2413ce..e08ed84 100644
+--- a/credential.c
++++ b/credential.c
+@@ -89,6 +89,11 @@ static int proto_is_http(const char *s)
+
+ static void credential_apply_config(struct credential *c)
+ {
++ if (!c->host)
++ die(_("refusing to work with credential missing host field"));
++ if (!c->protocol)
++ die(_("refusing to work with credential missing protocol field"));
++
+ if (c->configured)
+ return;
+ git_config(credential_config_callback, c);
+@@ -191,8 +196,11 @@ int credential_read(struct credential *c, FILE *fp)
+ return 0;
+ }
+
+-static void credential_write_item(FILE *fp, const char *key, const char *value)
++static void credential_write_item(FILE *fp, const char *key, const char *value,
++ int required)
+ {
++ if (!value && required)
++ BUG("credential value for %s is missing", key);
+ if (!value)
+ return;
+ if (strchr(value, '\n'))
+@@ -202,11 +210,11 @@ static void credential_write_item(FILE *fp, const char *key, const char *value)
+
+ void credential_write(const struct credential *c, FILE *fp)
+ {
+- credential_write_item(fp, "protocol", c->protocol);
+- credential_write_item(fp, "host", c->host);
+- credential_write_item(fp, "path", c->path);
+- credential_write_item(fp, "username", c->username);
+- credential_write_item(fp, "password", c->password);
++ credential_write_item(fp, "protocol", c->protocol, 1);
++ credential_write_item(fp, "host", c->host, 1);
++ credential_write_item(fp, "path", c->path, 0);
++ credential_write_item(fp, "username", c->username, 0);
++ credential_write_item(fp, "password", c->password, 0);
+ }
+
+ static int run_credential_helper(struct credential *c,
+diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
+index 1c1010b..646f845 100755
+--- a/t/t0300-credentials.sh
++++ b/t/t0300-credentials.sh
+@@ -400,18 +400,16 @@ test_expect_success 'empty helper spec resets helper list' '
+ EOF
+ '
+
+-test_expect_success 'url parser ignores embedded newlines' '
+- check fill <<-EOF
++test_expect_success 'url parser rejects embedded newlines' '
++ test_must_fail git credential fill 2>stderr <<-\EOF &&
+ url=https://one.example.com?%0ahost=two.example.com/
+- --
+- username=askpass-username
+- password=askpass-password
+- --
++ EOF
++ cat >expect <<-\EOF &&
+ warning: url contains a newline in its host component: https://one.example.com?%0ahost=two.example.com/
+ warning: skipping credential lookup for url: https://one.example.com?%0ahost=two.example.com/
+- askpass: Username:
+- askpass: Password:
++ fatal: refusing to work with credential missing host field
+ EOF
++ test_i18ncmp expect stderr
+ '
+
+ test_expect_success 'host-less URLs are parsed as empty host' '
+@@ -431,4 +429,24 @@ test_expect_success 'host-less URLs are parsed as empty host' '
+ EOF
+ '
+
++test_expect_success 'credential system refuses to work with missing host' '
++ test_must_fail git credential fill 2>stderr <<-\EOF &&
++ protocol=http
++ EOF
++ cat >expect <<-\EOF &&
++ fatal: refusing to work with credential missing host field
++ EOF
++ test_i18ncmp expect stderr
++'
++
++test_expect_success 'credential system refuses to work with missing protocol' '
++ test_must_fail git credential fill 2>stderr <<-\EOF &&
++ host=example.com
++ EOF
++ cat >expect <<-\EOF &&
++ fatal: refusing to work with credential missing protocol field
++ EOF
++ test_i18ncmp expect stderr
++'
++
+ test_done
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-5.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-5.patch
new file mode 100644
index 0000000000..60f8d59082
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-5.patch
@@ -0,0 +1,211 @@
+From 3431abe8c0f64f4049a31298c0b1056baa7d81dc Mon Sep 17 00:00:00 2001
+From: Li Zhou <li.zhou@windriver.com>
+Date: Mon, 27 Apr 2020 14:45:49 +0800
+Subject: [PATCH 08/12] fsck: convert gitmodules url to URL passed to curl
+
+In 07259e74ec1 (fsck: detect gitmodules URLs with embedded newlines,
+2020-03-11), git fsck learned to check whether URLs in .gitmodules could
+be understood by the credential machinery when they are handled by
+git-remote-curl.
+
+However, the check is overbroad: it checks all URLs instead of only
+URLs that would be passed to git-remote-curl. In principle a git:// or
+file:/// URL does not need to follow the same conventions as an http://
+URL; in particular, git:// and file:// protocols are not succeptible to
+issues in the credential API because they do not support attaching
+credentials.
+
+In the HTTP case, the URL in .gitmodules does not always match the URL
+that would be passed to git-remote-curl and the credential machinery:
+Git's URL syntax allows specifying a remote helper followed by a "::"
+delimiter and a URL to be passed to it, so that
+
+ git ls-remote http::https://example.com/repo.git
+
+invokes git-remote-http with https://example.com/repo.git as its URL
+argument. With today's checks, that distinction does not make a
+difference, but for a check we are about to introduce (for empty URL
+schemes) it will matter.
+
+.gitmodules files also support relative URLs. To ensure coverage for the
+https based embedded-newline attack, urldecode and check them directly
+for embedded newlines.
+
+Helped-by: Jeff King <peff@peff.net>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+Reviewed-by: Jeff King <peff@peff.net>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (5)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ fsck.c | 94 ++++++++++++++++++++++++++++++++++++++++---
+ t/t7416-submodule-dash-url.sh | 29 +++++++++++++
+ 2 files changed, 118 insertions(+), 5 deletions(-)
+
+diff --git a/fsck.c b/fsck.c
+index ea46eea..0f21eb1 100644
+--- a/fsck.c
++++ b/fsck.c
+@@ -9,6 +9,7 @@
+ #include "tag.h"
+ #include "fsck.h"
+ #include "refs.h"
++#include "url.h"
+ #include "utf8.h"
+ #include "decorate.h"
+ #include "oidset.h"
+@@ -948,17 +949,100 @@ static int fsck_tag(struct tag *tag, const char *data,
+ return fsck_tag_buffer(tag, data, size, options);
+ }
+
++/*
++ * Like builtin/submodule--helper.c's starts_with_dot_slash, but without
++ * relying on the platform-dependent is_dir_sep helper.
++ *
++ * This is for use in checking whether a submodule URL is interpreted as
++ * relative to the current directory on any platform, since \ is a
++ * directory separator on Windows but not on other platforms.
++ */
++static int starts_with_dot_slash(const char *str)
++{
++ return str[0] == '.' && (str[1] == '/' || str[1] == '\\');
++}
++
++/*
++ * Like starts_with_dot_slash, this is a variant of submodule--helper's
++ * helper of the same name with the twist that it accepts backslash as a
++ * directory separator even on non-Windows platforms.
++ */
++static int starts_with_dot_dot_slash(const char *str)
++{
++ return str[0] == '.' && starts_with_dot_slash(str + 1);
++}
++
++static int submodule_url_is_relative(const char *url)
++{
++ return starts_with_dot_slash(url) || starts_with_dot_dot_slash(url);
++}
++
++/*
++ * Check whether a transport is implemented by git-remote-curl.
++ *
++ * If it is, returns 1 and writes the URL that would be passed to
++ * git-remote-curl to the "out" parameter.
++ *
++ * Otherwise, returns 0 and leaves "out" untouched.
++ *
++ * Examples:
++ * http::https://example.com/repo.git -> 1, https://example.com/repo.git
++ * https://example.com/repo.git -> 1, https://example.com/repo.git
++ * git://example.com/repo.git -> 0
++ *
++ * This is for use in checking for previously exploitable bugs that
++ * required a submodule URL to be passed to git-remote-curl.
++ */
++static int url_to_curl_url(const char *url, const char **out)
++{
++ /*
++ * We don't need to check for case-aliases, "http.exe", and so
++ * on because in the default configuration, is_transport_allowed
++ * prevents URLs with those schemes from being cloned
++ * automatically.
++ */
++ if (skip_prefix(url, "http::", out) ||
++ skip_prefix(url, "https::", out) ||
++ skip_prefix(url, "ftp::", out) ||
++ skip_prefix(url, "ftps::", out))
++ return 1;
++ if (starts_with(url, "http://") ||
++ starts_with(url, "https://") ||
++ starts_with(url, "ftp://") ||
++ starts_with(url, "ftps://")) {
++ *out = url;
++ return 1;
++ }
++ return 0;
++}
++
+ static int check_submodule_url(const char *url)
+ {
+- struct credential c = CREDENTIAL_INIT;
+- int ret;
++ const char *curl_url;
+
+ if (looks_like_command_line_option(url))
+ return -1;
+
+- ret = credential_from_url_gently(&c, url, 1);
+- credential_clear(&c);
+- return ret;
++ if (submodule_url_is_relative(url)) {
++ /*
++ * This could be appended to an http URL and url-decoded;
++ * check for malicious characters.
++ */
++ char *decoded = url_decode(url);
++ int has_nl = !!strchr(decoded, '\n');
++ free(decoded);
++ if (has_nl)
++ return -1;
++ }
++
++ else if (url_to_curl_url(url, &curl_url)) {
++ struct credential c = CREDENTIAL_INIT;
++ int ret = credential_from_url_gently(&c, curl_url, 1);
++ credential_clear(&c);
++ return ret;
++ }
++
++ return 0;
+ }
+
+ struct fsck_gitmodules_data {
+diff --git a/t/t7416-submodule-dash-url.sh b/t/t7416-submodule-dash-url.sh
+index 41431b1..afdd255 100755
+--- a/t/t7416-submodule-dash-url.sh
++++ b/t/t7416-submodule-dash-url.sh
+@@ -60,6 +60,20 @@ test_expect_success 'trailing backslash is handled correctly' '
+ test_i18ngrep ! "unknown option" err
+ '
+
++test_expect_success 'fsck permits embedded newline with unrecognized scheme' '
++ git checkout --orphan newscheme &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = "data://acjbkd%0akajfdickajkd"
++ EOF
++ git add .gitmodules &&
++ git commit -m "gitmodules with unrecognized scheme" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ git push dst HEAD
++'
++
+ test_expect_success 'fsck rejects embedded newline in url' '
+ # create an orphan branch to avoid existing .gitmodules objects
+ git checkout --orphan newline &&
+@@ -76,4 +90,19 @@ test_expect_success 'fsck rejects embedded newline in url' '
+ grep gitmodulesUrl err
+ '
+
++test_expect_success 'fsck rejects embedded newline in relative url' '
++ git checkout --orphan relative-newline &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = "./%0ahost=two.example.com/foo.git"
++ EOF
++ git add .gitmodules &&
++ git commit -m "relative url with newline" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ test_must_fail git push dst HEAD 2>err &&
++ grep gitmodulesUrl err
++'
++
+ test_done
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-6.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-6.patch
new file mode 100644
index 0000000000..6b36893030
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-6.patch
@@ -0,0 +1,84 @@
+From 883508bcebe87fbe7fb7392272e930c27c30fdc2 Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Sat, 18 Apr 2020 20:53:09 -0700
+Subject: [PATCH 09/12] credential: die() when parsing invalid urls
+
+When we try to initialize credential loading by URL and find that the
+URL is invalid, we set all fields to NULL in order to avoid acting on
+malicious input. Later when we request credentials, we diagonse the
+erroneous input:
+
+ fatal: refusing to work with credential missing host field
+
+This is problematic in two ways:
+
+- The message doesn't tell the user *why* we are missing the host
+ field, so they can't tell from this message alone how to recover.
+ There can be intervening messages after the original warning of
+ bad input, so the user may not have the context to put two and two
+ together.
+
+- The error only occurs when we actually need to get a credential. If
+ the URL permits anonymous access, the only encouragement the user gets
+ to correct their bogus URL is a quiet warning.
+
+ This is inconsistent with the check we perform in fsck, where any use
+ of such a URL as a submodule is an error.
+
+When we see such a bogus URL, let's not try to be nice and continue
+without helpers. Instead, die() immediately. This is simpler and
+obviously safe. And there's very little chance of disrupting a normal
+workflow.
+
+It's _possible_ that somebody has a legitimate URL with a raw newline in
+it. It already wouldn't work with credential helpers, so this patch
+steps that up from an inconvenience to "we will refuse to work with it
+at all". If such a case does exist, we should figure out a way to work
+with it (especially if the newline is only in the path component, which
+we normally don't even pass to helpers). But until we see a real report,
+we're better off being defensive.
+
+Reported-by: Carlo Arenas <carenas@gmail.com>
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (6)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ credential.c | 6 ++----
+ t/t0300-credentials.sh | 3 +--
+ 2 files changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/credential.c b/credential.c
+index e08ed84..22649d5 100644
+--- a/credential.c
++++ b/credential.c
+@@ -408,8 +408,6 @@ int credential_from_url_gently(struct credential *c, const char *url,
+
+ void credential_from_url(struct credential *c, const char *url)
+ {
+- if (credential_from_url_gently(c, url, 0) < 0) {
+- warning(_("skipping credential lookup for url: %s"), url);
+- credential_clear(c);
+- }
++ if (credential_from_url_gently(c, url, 0) < 0)
++ die(_("credential url cannot be parsed: %s"), url);
+ }
+diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
+index 646f845..efed3ea 100755
+--- a/t/t0300-credentials.sh
++++ b/t/t0300-credentials.sh
+@@ -406,8 +406,7 @@ test_expect_success 'url parser rejects embedded newlines' '
+ EOF
+ cat >expect <<-\EOF &&
+ warning: url contains a newline in its host component: https://one.example.com?%0ahost=two.example.com/
+- warning: skipping credential lookup for url: https://one.example.com?%0ahost=two.example.com/
+- fatal: refusing to work with credential missing host field
++ fatal: credential url cannot be parsed: https://one.example.com?%0ahost=two.example.com/
+ EOF
+ test_i18ncmp expect stderr
+ '
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-7.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-7.patch
new file mode 100644
index 0000000000..5e3b6f1454
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-7.patch
@@ -0,0 +1,206 @@
+From 68acf8724e9cb2f67664dd980581c0022401daf0 Mon Sep 17 00:00:00 2001
+From: Jonathan Nieder <jrnieder@gmail.com>
+Date: Sat, 18 Apr 2020 20:54:13 -0700
+Subject: [PATCH 10/12] credential: treat URL without scheme as invalid
+
+libcurl permits making requests without a URL scheme specified. In
+this case, it guesses the URL from the hostname, so I can run
+
+ git ls-remote http::ftp.example.com/path/to/repo
+
+and it would make an FTP request.
+
+Any user intentionally using such a URL is likely to have made a typo.
+Unfortunately, credential_from_url is not able to determine the host and
+protocol in order to determine appropriate credentials to send, and
+until "credential: refuse to operate when missing host or protocol",
+this resulted in another host's credentials being leaked to the named
+host.
+
+Teach credential_from_url_gently to consider such a URL to be invalid
+so that fsck can detect and block gitmodules files with such URLs,
+allowing server operators to avoid serving them to downstream users
+running older versions of Git.
+
+This also means that when such URLs are passed on the command line, Git
+will print a clearer error so affected users can switch to the simpler
+URL that explicitly specifies the host and protocol they intend.
+
+One subtlety: .gitmodules files can contain relative URLs, representing
+a URL relative to the URL they were cloned from. The relative URL
+resolver used for .gitmodules can follow ".." components out of the path
+part and past the host part of a URL, meaning that such a relative URL
+can be used to traverse from a https://foo.example.com/innocent
+superproject to a https::attacker.example.com/exploit submodule.
+Fortunately a leading ':' in the first path component after a series of
+leading './' and '../' components is unlikely to show up in other
+contexts, so we can catch this by detecting that pattern.
+
+Reported-by: Jeff King <peff@peff.net>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+Reviewed-by: Jeff King <peff@peff.net>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (7)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ credential.c | 7 +++++--
+ fsck.c | 47 +++++++++++++++++++++++++++++++++++++++++--
+ t/t5550-http-fetch-dumb.sh | 7 ++-----
+ t/t7416-submodule-dash-url.sh | 32 +++++++++++++++++++++++++++++
+ 4 files changed, 84 insertions(+), 9 deletions(-)
+
+diff --git a/credential.c b/credential.c
+index 22649d5..1e1aed5 100644
+--- a/credential.c
++++ b/credential.c
+@@ -360,8 +360,11 @@ int credential_from_url_gently(struct credential *c, const char *url,
+ * (3) proto://<user>:<pass>@<host>/...
+ */
+ proto_end = strstr(url, "://");
+- if (!proto_end)
+- return 0;
++ if (!proto_end) {
++ if (!quiet)
++ warning(_("url has no scheme: %s"), url);
++ return -1;
++ }
+ cp = proto_end + 3;
+ at = strchr(cp, '@');
+ colon = strchr(cp, ':');
+diff --git a/fsck.c b/fsck.c
+index 0f21eb1..30eac29 100644
+--- a/fsck.c
++++ b/fsck.c
+@@ -978,6 +978,34 @@ static int submodule_url_is_relative(const char *url)
+ }
+
+ /*
++ * Count directory components that a relative submodule URL should chop
++ * from the remote_url it is to be resolved against.
++ *
++ * In other words, this counts "../" components at the start of a
++ * submodule URL.
++ *
++ * Returns the number of directory components to chop and writes a
++ * pointer to the next character of url after all leading "./" and
++ * "../" components to out.
++ */
++static int count_leading_dotdots(const char *url, const char **out)
++{
++ int result = 0;
++ while (1) {
++ if (starts_with_dot_dot_slash(url)) {
++ result++;
++ url += strlen("../");
++ continue;
++ }
++ if (starts_with_dot_slash(url)) {
++ url += strlen("./");
++ continue;
++ }
++ *out = url;
++ return result;
++ }
++}
++/*
+ * Check whether a transport is implemented by git-remote-curl.
+ *
+ * If it is, returns 1 and writes the URL that would be passed to
+@@ -1024,15 +1052,30 @@ static int check_submodule_url(const char *url)
+ return -1;
+
+ if (submodule_url_is_relative(url)) {
++ char *decoded;
++ const char *next;
++ int has_nl;
++
+ /*
+ * This could be appended to an http URL and url-decoded;
+ * check for malicious characters.
+ */
+- char *decoded = url_decode(url);
+- int has_nl = !!strchr(decoded, '\n');
++ decoded = url_decode(url);
++ has_nl = !!strchr(decoded, '\n');
++
+ free(decoded);
+ if (has_nl)
+ return -1;
++
++ /*
++ * URLs which escape their root via "../" can overwrite
++ * the host field and previous components, resolving to
++ * URLs like https::example.com/submodule.git that were
++ * susceptible to CVE-2020-11008.
++ */
++ if (count_leading_dotdots(url, &next) > 0 &&
++ *next == ':')
++ return -1;
+ }
+
+ else if (url_to_curl_url(url, &curl_url)) {
+diff --git a/t/t5550-http-fetch-dumb.sh b/t/t5550-http-fetch-dumb.sh
+index b811d89..1c9e5d3 100755
+--- a/t/t5550-http-fetch-dumb.sh
++++ b/t/t5550-http-fetch-dumb.sh
+@@ -321,11 +321,8 @@ test_expect_success 'git client does not send an empty Accept-Language' '
+ '
+
+ test_expect_success 'remote-http complains cleanly about malformed urls' '
+- # do not actually issue "list" or other commands, as we do not
+- # want to rely on what curl would actually do with such a broken
+- # URL. This is just about making sure we do not segfault during
+- # initialization.
+- test_must_fail git remote-http http::/example.com/repo.git
++ test_must_fail git remote-http http::/example.com/repo.git 2>stderr &&
++ test_i18ngrep "url has no scheme" stderr
+ '
+
+ test_expect_success 'redirects can be forbidden/allowed' '
+diff --git a/t/t7416-submodule-dash-url.sh b/t/t7416-submodule-dash-url.sh
+index afdd255..249dc3d 100755
+--- a/t/t7416-submodule-dash-url.sh
++++ b/t/t7416-submodule-dash-url.sh
+@@ -60,6 +60,38 @@ test_expect_success 'trailing backslash is handled correctly' '
+ test_i18ngrep ! "unknown option" err
+ '
+
++test_expect_success 'fsck rejects missing URL scheme' '
++ git checkout --orphan missing-scheme &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = http::one.example.com/foo.git
++ EOF
++ git add .gitmodules &&
++ test_tick &&
++ git commit -m "gitmodules with missing URL scheme" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ test_must_fail git push dst HEAD 2>err &&
++ grep gitmodulesUrl err
++'
++
++test_expect_success 'fsck rejects relative URL resolving to missing scheme' '
++ git checkout --orphan relative-missing-scheme &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = "..\\../.\\../:one.example.com/foo.git"
++ EOF
++ git add .gitmodules &&
++ test_tick &&
++ git commit -m "gitmodules with relative URL that strips off scheme" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ test_must_fail git push dst HEAD 2>err &&
++ grep gitmodulesUrl err
++'
++
+ test_expect_success 'fsck permits embedded newline with unrecognized scheme' '
+ git checkout --orphan newscheme &&
+ cat >.gitmodules <<-\EOF &&
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-8.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-8.patch
new file mode 100644
index 0000000000..935d47795f
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-8.patch
@@ -0,0 +1,114 @@
+From 5e06d0781a963d62413ae7eab4eb78cc7195af8b Mon Sep 17 00:00:00 2001
+From: Jonathan Nieder <jrnieder@gmail.com>
+Date: Sat, 18 Apr 2020 20:54:57 -0700
+Subject: [PATCH 11/12] credential: treat URL with empty scheme as invalid
+
+Until "credential: refuse to operate when missing host or protocol",
+Git's credential handling code interpreted URLs with empty scheme to
+mean "give me credentials matching this host for any protocol".
+
+Luckily libcurl does not recognize such URLs (it tries to look for a
+protocol named "" and fails). Just in case that changes, let's reject
+them within Git as well. This way, credential_from_url is guaranteed to
+always produce a "struct credential" with protocol and host set.
+
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (8)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ credential.c | 5 ++---
+ t/t5550-http-fetch-dumb.sh | 9 +++++++++
+ t/t7416-submodule-dash-url.sh | 32 ++++++++++++++++++++++++++++++++
+ 3 files changed, 43 insertions(+), 3 deletions(-)
+
+diff --git a/credential.c b/credential.c
+index 1e1aed5..cf11cc9 100644
+--- a/credential.c
++++ b/credential.c
+@@ -360,7 +360,7 @@ int credential_from_url_gently(struct credential *c, const char *url,
+ * (3) proto://<user>:<pass>@<host>/...
+ */
+ proto_end = strstr(url, "://");
+- if (!proto_end) {
++ if (!proto_end || proto_end == url) {
+ if (!quiet)
+ warning(_("url has no scheme: %s"), url);
+ return -1;
+@@ -385,8 +385,7 @@ int credential_from_url_gently(struct credential *c, const char *url,
+ host = at + 1;
+ }
+
+- if (proto_end - url > 0)
+- c->protocol = xmemdupz(url, proto_end - url);
++ c->protocol = xmemdupz(url, proto_end - url);
+ c->host = url_decode_mem(host, slash - host);
+ /* Trim leading and trailing slashes from path */
+ while (*slash == '/')
+diff --git a/t/t5550-http-fetch-dumb.sh b/t/t5550-http-fetch-dumb.sh
+index 1c9e5d3..ea2688b 100755
+--- a/t/t5550-http-fetch-dumb.sh
++++ b/t/t5550-http-fetch-dumb.sh
+@@ -325,6 +325,15 @@ test_expect_success 'remote-http complains cleanly about malformed urls' '
+ test_i18ngrep "url has no scheme" stderr
+ '
+
++# NEEDSWORK: Writing commands to git-remote-curl can race against the latter
++# erroring out, producing SIGPIPE. Remove "ok=sigpipe" once transport-helper has
++# learned to handle early remote helper failures more cleanly.
++test_expect_success 'remote-http complains cleanly about empty scheme' '
++ test_must_fail ok=sigpipe git ls-remote \
++ http::${HTTPD_URL#http}/dumb/repo.git 2>stderr &&
++ test_i18ngrep "url has no scheme" stderr
++'
++
+ test_expect_success 'redirects can be forbidden/allowed' '
+ test_must_fail git -c http.followRedirects=false \
+ clone $HTTPD_URL/dumb-redir/repo.git dumb-redir &&
+diff --git a/t/t7416-submodule-dash-url.sh b/t/t7416-submodule-dash-url.sh
+index 249dc3d..9309040 100755
+--- a/t/t7416-submodule-dash-url.sh
++++ b/t/t7416-submodule-dash-url.sh
+@@ -92,6 +92,38 @@ test_expect_success 'fsck rejects relative URL resolving to missing scheme' '
+ grep gitmodulesUrl err
+ '
+
++test_expect_success 'fsck rejects empty URL scheme' '
++ git checkout --orphan empty-scheme &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = http::://one.example.com/foo.git
++ EOF
++ git add .gitmodules &&
++ test_tick &&
++ git commit -m "gitmodules with empty URL scheme" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ test_must_fail git push dst HEAD 2>err &&
++ grep gitmodulesUrl err
++'
++
++test_expect_success 'fsck rejects relative URL resolving to empty scheme' '
++ git checkout --orphan relative-empty-scheme &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = ../../../:://one.example.com/foo.git
++ EOF
++ git add .gitmodules &&
++ test_tick &&
++ git commit -m "relative gitmodules URL resolving to empty scheme" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ test_must_fail git push dst HEAD 2>err &&
++ grep gitmodulesUrl err
++'
++
+ test_expect_success 'fsck permits embedded newline with unrecognized scheme' '
+ git checkout --orphan newscheme &&
+ cat >.gitmodules <<-\EOF &&
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-11008-9.patch b/meta/recipes-devtools/git/git/CVE-2020-11008-9.patch
new file mode 100644
index 0000000000..22292dbbbf
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-11008-9.patch
@@ -0,0 +1,114 @@
+From 2e084e25fa454c58a600c9434f776f2150037a76 Mon Sep 17 00:00:00 2001
+From: Jonathan Nieder <jrnieder@gmail.com>
+Date: Sat, 18 Apr 2020 20:57:22 -0700
+Subject: [PATCH 12/12] fsck: reject URL with empty host in .gitmodules
+
+Git's URL parser interprets
+
+ https:///example.com/repo.git
+
+to have no host and a path of "example.com/repo.git". Curl, on the
+other hand, internally redirects it to https://example.com/repo.git. As
+a result, until "credential: parse URL without host as empty host, not
+unset", tricking a user into fetching from such a URL would cause Git to
+send credentials for another host to example.com.
+
+Teach fsck to block and detect .gitmodules files using such a URL to
+prevent sharing them with Git versions that are not yet protected.
+
+A relative URL in a .gitmodules file could also be used to trigger this.
+The relative URL resolver used for .gitmodules does not normalize
+sequences of slashes and can follow ".." components out of the path part
+and to the host part of a URL, meaning that such a relative URL can be
+used to traverse from a https://foo.example.com/innocent superproject to
+a https:///attacker.example.com/exploit submodule. Fortunately,
+redundant extra slashes in .gitmodules are rare, so we can catch this by
+detecting one after a leading sequence of "./" and "../" components.
+
+Helped-by: Jeff King <peff@peff.net>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+Reviewed-by: Jeff King <peff@peff.net>
+
+Upstream-Status: Backport
+CVE: CVE-2020-11008 (9)
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ fsck.c | 10 +++++++---
+ t/t7416-submodule-dash-url.sh | 32 ++++++++++++++++++++++++++++++++
+ 2 files changed, 39 insertions(+), 3 deletions(-)
+
+diff --git a/fsck.c b/fsck.c
+index 30eac29..00077b1 100644
+--- a/fsck.c
++++ b/fsck.c
+@@ -1070,17 +1070,21 @@ static int check_submodule_url(const char *url)
+ /*
+ * URLs which escape their root via "../" can overwrite
+ * the host field and previous components, resolving to
+- * URLs like https::example.com/submodule.git that were
++ * URLs like https::example.com/submodule.git and
++ * https:///example.com/submodule.git that were
+ * susceptible to CVE-2020-11008.
+ */
+ if (count_leading_dotdots(url, &next) > 0 &&
+- *next == ':')
++ (*next == ':' || *next == '/'))
+ return -1;
+ }
+
+ else if (url_to_curl_url(url, &curl_url)) {
+ struct credential c = CREDENTIAL_INIT;
+- int ret = credential_from_url_gently(&c, curl_url, 1);
++ int ret = 0;
++ if (credential_from_url_gently(&c, curl_url, 1) ||
++ !*c.host)
++ ret = -1;
+ credential_clear(&c);
+ return ret;
+ }
+diff --git a/t/t7416-submodule-dash-url.sh b/t/t7416-submodule-dash-url.sh
+index 9309040..eec96e0 100755
+--- a/t/t7416-submodule-dash-url.sh
++++ b/t/t7416-submodule-dash-url.sh
+@@ -124,6 +124,38 @@ test_expect_success 'fsck rejects relative URL resolving to empty scheme' '
+ grep gitmodulesUrl err
+ '
+
++test_expect_success 'fsck rejects empty hostname' '
++ git checkout --orphan empty-host &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = http:///one.example.com/foo.git
++ EOF
++ git add .gitmodules &&
++ test_tick &&
++ git commit -m "gitmodules with extra slashes" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ test_must_fail git push dst HEAD 2>err &&
++ grep gitmodulesUrl err
++'
++
++test_expect_success 'fsck rejects relative url that produced empty hostname' '
++ git checkout --orphan messy-relative &&
++ cat >.gitmodules <<-\EOF &&
++ [submodule "foo"]
++ url = ../../..//one.example.com/foo.git
++ EOF
++ git add .gitmodules &&
++ test_tick &&
++ git commit -m "gitmodules abusing relative_path" &&
++ test_when_finished "rm -rf dst" &&
++ git init --bare dst &&
++ git -C dst config transfer.fsckObjects true &&
++ test_must_fail git push dst HEAD 2>err &&
++ grep gitmodulesUrl err
++'
++
+ test_expect_success 'fsck permits embedded newline with unrecognized scheme' '
+ git checkout --orphan newscheme &&
+ cat >.gitmodules <<-\EOF &&
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/git/git/CVE-2020-5260.patch b/meta/recipes-devtools/git/git/CVE-2020-5260.patch
new file mode 100644
index 0000000000..d03e701a8f
--- /dev/null
+++ b/meta/recipes-devtools/git/git/CVE-2020-5260.patch
@@ -0,0 +1,65 @@
+From 9a6bbee8006c24b46a85d29e7b38cfa79e9ab21b Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Wed, 11 Mar 2020 17:53:41 -0400
+Subject: [PATCH] credential: avoid writing values with newlines
+
+The credential protocol that we use to speak to helpers can't represent
+values with newlines in them. This was an intentional design choice to
+keep the protocol simple, since none of the values we pass should
+generally have newlines.
+
+However, if we _do_ encounter a newline in a value, we blindly transmit
+it in credential_write(). Such values may break the protocol syntax, or
+worse, inject new valid lines into the protocol stream.
+
+The most likely way for a newline to end up in a credential struct is by
+decoding a URL with a percent-encoded newline. However, since the bug
+occurs at the moment we write the value to the protocol, we'll catch it
+there. That should leave no possibility of accidentally missing a code
+path that can trigger the problem.
+
+At this level of the code we have little choice but to die(). However,
+since we'd not ever expect to see this case outside of a malicious URL,
+that's an acceptable outcome.
+
+Reported-by: Felix Wilhelm <fwilhelm@google.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-5260
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ credential.c | 2 ++
+ t/t0300-credentials.sh | 6 ++++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/credential.c b/credential.c
+index 9747f47..00ee4d6 100644
+--- a/credential.c
++++ b/credential.c
+@@ -194,6 +194,8 @@ static void credential_write_item(FILE *fp, const char *key, const char *value)
+ {
+ if (!value)
+ return;
++ if (strchr(value, '\n'))
++ die("credential value for %s contains newline", key);
+ fprintf(fp, "%s=%s\n", key, value);
+ }
+
+diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
+index 03bd31e..15cc3c5 100755
+--- a/t/t0300-credentials.sh
++++ b/t/t0300-credentials.sh
+@@ -309,4 +309,10 @@ test_expect_success 'empty helper spec resets helper list' '
+ EOF
+ '
+
++test_expect_success 'url parser rejects embedded newlines' '
++ test_must_fail git credential fill <<-\EOF
++ url=https://one.example.com?%0ahost=two.example.com/
++ EOF
++'
++
+ test_done
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/go/go-1.12.inc b/meta/recipes-devtools/go/go-1.12.inc
index 6aecaad75d..2a0680aeaa 100644
--- a/meta/recipes-devtools/go/go-1.12.inc
+++ b/meta/recipes-devtools/go/go-1.12.inc
@@ -18,6 +18,10 @@ SRC_URI += "\
file://0008-use-GOBUILDMODE-to-set-buildmode.patch \
file://0001-release-branch.go1.12-security-net-textproto-don-t-n.patch \
file://0010-fix-CVE-2019-17596.patch \
+ file://CVE-2020-15586.patch \
+ file://CVE-2020-16845.patch \
+ file://0001-net-http-cgi-rename-a-test-file-to-be-less-cute.patch \
+ file://CVE-2020-24553.patch \
"
SRC_URI_append_libc-musl = " file://0009-ld-replace-glibc-dynamic-linker-with-musl.patch"
diff --git a/meta/recipes-devtools/go/go-1.12/0001-net-http-cgi-rename-a-test-file-to-be-less-cute.patch b/meta/recipes-devtools/go/go-1.12/0001-net-http-cgi-rename-a-test-file-to-be-less-cute.patch
new file mode 100644
index 0000000000..7c07961c03
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.12/0001-net-http-cgi-rename-a-test-file-to-be-less-cute.patch
@@ -0,0 +1,28 @@
+From 8390c478600b852392cb116741b3cb239c94d123 Mon Sep 17 00:00:00 2001
+From: Brad Fitzpatrick <bradfitz@golang.org>
+Date: Wed, 15 Jan 2020 18:08:10 +0000
+Subject: [PATCH] net/http/cgi: rename a test file to be less cute
+
+My fault (from CL 4245070), sorry.
+
+Change-Id: Ib95d3170dc326e74aa74c22421c4e44a8b00f577
+Reviewed-on: https://go-review.googlesource.com/c/go/+/214920
+Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+Reviewed-by: Emmanuel Odeke <emm.odeke@gmail.com>
+
+Upstream-Status: Backport
+[lz: Add this patch for merging the patch for CVE-2020-24553]
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ src/net/http/cgi/{matryoshka_test.go => integration_test.go} | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+ rename src/net/http/cgi/{matryoshka_test.go => integration_test.go} (100%)
+
+diff --git a/src/net/http/cgi/matryoshka_test.go b/src/net/http/cgi/integration_test.go
+similarity index 100%
+rename from src/net/http/cgi/matryoshka_test.go
+rename to src/net/http/cgi/integration_test.go
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/go/go-1.12/CVE-2020-15586.patch b/meta/recipes-devtools/go/go-1.12/CVE-2020-15586.patch
new file mode 100644
index 0000000000..ebdc5aec6d
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.12/CVE-2020-15586.patch
@@ -0,0 +1,131 @@
+From fa98f46741f818913a8c11b877520a548715131f Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Mon, 13 Jul 2020 13:27:22 -0400
+Subject: [PATCH] net/http: synchronize "100 Continue" write and Handler writes
+
+The expectContinueReader writes to the connection on the first
+Request.Body read. Since a Handler might be doing a read in parallel or
+before a write, expectContinueReader needs to synchronize with the
+ResponseWriter, and abort if a response already went out.
+
+The tests will land in a separate CL.
+
+Fixes #34902
+Fixes CVE-2020-15586
+
+Change-Id: Icdd8dd539f45e8863762bd378194bb4741e875fc
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/793350
+Reviewed-by: Filippo Valsorda <valsorda@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/242598
+Run-TryBot: Katie Hockman <katie@golang.org>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+
+Upstream-Status: Backport
+CVE: CVE-2020-15586
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ src/net/http/server.go | 43 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/src/net/http/server.go b/src/net/http/server.go
+index a995a50658..d41b5f6f48 100644
+--- a/src/net/http/server.go
++++ b/src/net/http/server.go
+@@ -425,6 +425,16 @@ type response struct {
+ wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
+ wantsClose bool // HTTP request has Connection "close"
+
++ // canWriteContinue is a boolean value accessed as an atomic int32
++ // that says whether or not a 100 Continue header can be written
++ // to the connection.
++ // writeContinueMu must be held while writing the header.
++ // These two fields together synchronize the body reader
++ // (the expectContinueReader, which wants to write 100 Continue)
++ // against the main writer.
++ canWriteContinue atomicBool
++ writeContinueMu sync.Mutex
++
+ w *bufio.Writer // buffers output in chunks to chunkWriter
+ cw chunkWriter
+
+@@ -515,6 +525,7 @@ type atomicBool int32
+
+ func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
+ func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
++func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
+
+ // declareTrailer is called for each Trailer header when the
+ // response header is written. It notes that a header will need to be
+@@ -878,21 +889,27 @@ type expectContinueReader struct {
+ resp *response
+ readCloser io.ReadCloser
+ closed bool
+- sawEOF bool
++ sawEOF atomicBool
+ }
+
+ func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
+ if ecr.closed {
+ return 0, ErrBodyReadAfterClose
+ }
+- if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() {
+- ecr.resp.wroteContinue = true
+- ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
+- ecr.resp.conn.bufw.Flush()
++ w := ecr.resp
++ if !w.wroteContinue && w.canWriteContinue.isSet() && !w.conn.hijacked() {
++ w.wroteContinue = true
++ w.writeContinueMu.Lock()
++ if w.canWriteContinue.isSet() {
++ w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
++ w.conn.bufw.Flush()
++ w.canWriteContinue.setFalse()
++ }
++ w.writeContinueMu.Unlock()
+ }
+ n, err = ecr.readCloser.Read(p)
+ if err == io.EOF {
+- ecr.sawEOF = true
++ ecr.sawEOF.setTrue()
+ }
+ return
+ }
+@@ -1311,7 +1328,7 @@ func (cw *chunkWriter) writeHeader(p []byte) {
+ // because we don't know if the next bytes on the wire will be
+ // the body-following-the-timer or the subsequent request.
+ // See Issue 11549.
+- if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF {
++ if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.isSet() {
+ w.closeAfterReply = true
+ }
+
+@@ -1561,6 +1578,17 @@ func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err er
+ }
+ return 0, ErrHijacked
+ }
++
++ if w.canWriteContinue.isSet() {
++ // Body reader wants to write 100 Continue but hasn't yet.
++ // Tell it not to. The store must be done while holding the lock
++ // because the lock makes sure that there is not an active write
++ // this very moment.
++ w.writeContinueMu.Lock()
++ w.canWriteContinue.setFalse()
++ w.writeContinueMu.Unlock()
++ }
++
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+@@ -1872,6 +1900,7 @@ func (c *conn) serve(ctx context.Context) {
+ if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
+ // Wrap the Body reader with one that replies on the connection
+ req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
++ w.canWriteContinue.setTrue()
+ }
+ } else if req.Header.get("Expect") != "" {
+ w.sendExpectationFailed()
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/go/go-1.12/CVE-2020-16845.patch b/meta/recipes-devtools/go/go-1.12/CVE-2020-16845.patch
new file mode 100644
index 0000000000..80f467522f
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.12/CVE-2020-16845.patch
@@ -0,0 +1,110 @@
+From 027d7241ce050d197e7fabea3d541ffbe3487258 Mon Sep 17 00:00:00 2001
+From: Katie Hockman <katie@golang.org>
+Date: Tue, 4 Aug 2020 11:45:32 -0400
+Subject: [PATCH] encoding/binary: read at most MaxVarintLen64 bytes in
+ ReadUvarint
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This CL ensures that ReadUvarint consumes only a limited
+amount of input (instead of an unbounded amount).
+
+On some inputs, ReadUvarint could read an arbitrary number
+of bytes before deciding to return an overflow error.
+After this CL, ReadUvarint returns that same overflow
+error sooner, after reading at most MaxVarintLen64 bytes.
+
+Fix authored by Robert Griesemer and Filippo Valsorda.
+
+Thanks to Diederik Loerakker, Jonny Rhea, Raúl Kripalani,
+and Preston Van Loon for reporting this.
+
+Fixes #40618
+Fixes CVE-2020-16845
+
+Change-Id: Ie0cb15972f14c38b7cf7af84c45c4ce54909bb8f
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/812099
+Reviewed-by: Filippo Valsorda <valsorda@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/247120
+Run-TryBot: Katie Hockman <katie@golang.org>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+Reviewed-by: Alexander Rakoczy <alex@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go.git]
+CVE: CVE-2020-16845
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ src/encoding/binary/varint.go | 5 +++--
+ src/encoding/binary/varint_test.go | 18 ++++++++++++------
+ 2 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/src/encoding/binary/varint.go b/src/encoding/binary/varint.go
+index bcb8ac9a45..38af61075c 100644
+--- a/src/encoding/binary/varint.go
++++ b/src/encoding/binary/varint.go
+@@ -106,13 +106,13 @@ var overflow = errors.New("binary: varint overflows a 64-bit integer")
+ func ReadUvarint(r io.ByteReader) (uint64, error) {
+ var x uint64
+ var s uint
+- for i := 0; ; i++ {
++ for i := 0; i < MaxVarintLen64; i++ {
+ b, err := r.ReadByte()
+ if err != nil {
+ return x, err
+ }
+ if b < 0x80 {
+- if i > 9 || i == 9 && b > 1 {
++ if i == 9 && b > 1 {
+ return x, overflow
+ }
+ return x | uint64(b)<<s, nil
+@@ -120,6 +120,7 @@ func ReadUvarint(r io.ByteReader) (uint64, error) {
+ x |= uint64(b&0x7f) << s
+ s += 7
+ }
++ return x, overflow
+ }
+
+ // ReadVarint reads an encoded signed integer from r and returns it as an int64.
+diff --git a/src/encoding/binary/varint_test.go b/src/encoding/binary/varint_test.go
+index ca411ecbd6..6ef4c99505 100644
+--- a/src/encoding/binary/varint_test.go
++++ b/src/encoding/binary/varint_test.go
+@@ -121,21 +121,27 @@ func TestBufferTooSmall(t *testing.T) {
+ }
+ }
+
+-func testOverflow(t *testing.T, buf []byte, n0 int, err0 error) {
++func testOverflow(t *testing.T, buf []byte, x0 uint64, n0 int, err0 error) {
+ x, n := Uvarint(buf)
+ if x != 0 || n != n0 {
+ t.Errorf("Uvarint(%v): got x = %d, n = %d; want 0, %d", buf, x, n, n0)
+ }
+
+- x, err := ReadUvarint(bytes.NewReader(buf))
+- if x != 0 || err != err0 {
+- t.Errorf("ReadUvarint(%v): got x = %d, err = %s; want 0, %s", buf, x, err, err0)
++ r := bytes.NewReader(buf)
++ len := r.Len()
++ x, err := ReadUvarint(r)
++ if x != x0 || err != err0 {
++ t.Errorf("ReadUvarint(%v): got x = %d, err = %s; want %d, %s", buf, x, err, x0, err0)
++ }
++ if read := len - r.Len(); read > MaxVarintLen64 {
++ t.Errorf("ReadUvarint(%v): read more than MaxVarintLen64 bytes, got %d", buf, read)
+ }
+ }
+
+ func TestOverflow(t *testing.T) {
+- testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2}, -10, overflow)
+- testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0, 0}, -13, overflow)
++ testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2}, 0, -10, overflow)
++ testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0, 0}, 0, -13, overflow)
++ testOverflow(t, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 1<<64-1, 0, overflow) // 11 bytes, should overflow
+ }
+
+ func TestNonCanonicalZero(t *testing.T) {
+--
+2.17.0
+
diff --git a/meta/recipes-devtools/go/go-1.12/CVE-2020-24553.patch b/meta/recipes-devtools/go/go-1.12/CVE-2020-24553.patch
new file mode 100644
index 0000000000..18a218bc9a
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.12/CVE-2020-24553.patch
@@ -0,0 +1,429 @@
+From eb07103a083237414145a45f029c873d57037e06 Mon Sep 17 00:00:00 2001
+From: Roberto Clapis <roberto@golang.org>
+Date: Wed, 26 Aug 2020 08:53:03 +0200
+Subject: [PATCH] [release-branch.go1.15-security] net/http/cgi,net/http/fcgi:
+ add Content-Type detection
+
+This CL ensures that responses served via CGI and FastCGI
+have a Content-Type header based on the content of the
+response if not explicitly set by handlers.
+
+If the implementers of the handler did not explicitly
+specify a Content-Type both CGI implementations would default
+to "text/html", potentially causing cross-site scripting.
+
+Thanks to RedTeam Pentesting GmbH for reporting this.
+
+Fixes CVE-2020-24553
+
+Change-Id: I82cfc396309b5ab2e8d6e9a87eda8ea7e3799473
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/823217
+Reviewed-by: Russ Cox <rsc@google.com>
+(cherry picked from commit 23d675d07fdc56aafd67c0a0b63d5b7e14708ff0)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/835311
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-24553
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ src/net/http/cgi/child.go | 36 ++++++++++-----
+ src/net/http/cgi/child_test.go | 69 ++++++++++++++++++++++++++++
+ src/net/http/cgi/integration_test.go | 53 ++++++++++++++++++++-
+ src/net/http/fcgi/child.go | 39 ++++++++++++----
+ src/net/http/fcgi/fcgi_test.go | 52 +++++++++++++++++++++
+ 5 files changed, 227 insertions(+), 22 deletions(-)
+
+diff --git a/src/net/http/cgi/child.go b/src/net/http/cgi/child.go
+index 9474175f17..61de6165f6 100644
+--- a/src/net/http/cgi/child.go
++++ b/src/net/http/cgi/child.go
+@@ -163,10 +163,12 @@ func Serve(handler http.Handler) error {
+ }
+
+ type response struct {
+- req *http.Request
+- header http.Header
+- bufw *bufio.Writer
+- headerSent bool
++ req *http.Request
++ header http.Header
++ code int
++ wroteHeader bool
++ wroteCGIHeader bool
++ bufw *bufio.Writer
+ }
+
+ func (r *response) Flush() {
+@@ -178,26 +180,38 @@ func (r *response) Header() http.Header {
+ }
+
+ func (r *response) Write(p []byte) (n int, err error) {
+- if !r.headerSent {
++ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
++ if !r.wroteCGIHeader {
++ r.writeCGIHeader(p)
++ }
+ return r.bufw.Write(p)
+ }
+
+ func (r *response) WriteHeader(code int) {
+- if r.headerSent {
++ if r.wroteHeader {
+ // Note: explicitly using Stderr, as Stdout is our HTTP output.
+ fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL)
+ return
+ }
+- r.headerSent = true
+- fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code))
++ r.wroteHeader = true
++ r.code = code
++}
+
+- // Set a default Content-Type
++// writeCGIHeader finalizes the header sent to the client and writes it to the output.
++// p is not written by writeHeader, but is the first chunk of the body
++// that will be written. It is sniffed for a Content-Type if none is
++// set explicitly.
++func (r *response) writeCGIHeader(p []byte) {
++ if r.wroteCGIHeader {
++ return
++ }
++ r.wroteCGIHeader = true
++ fmt.Fprintf(r.bufw, "Status: %d %s\r\n", r.code, http.StatusText(r.code))
+ if _, hasType := r.header["Content-Type"]; !hasType {
+- r.header.Add("Content-Type", "text/html; charset=utf-8")
++ r.header.Set("Content-Type", http.DetectContentType(p))
+ }
+-
+ r.header.Write(r.bufw)
+ r.bufw.WriteString("\r\n")
+ r.bufw.Flush()
+diff --git a/src/net/http/cgi/child_test.go b/src/net/http/cgi/child_test.go
+index 14e0af475f..f6ecb6eb80 100644
+--- a/src/net/http/cgi/child_test.go
++++ b/src/net/http/cgi/child_test.go
+@@ -7,6 +7,11 @@
+ package cgi
+
+ import (
++ "bufio"
++ "bytes"
++ "net/http"
++ "net/http/httptest"
++ "strings"
+ "testing"
+ )
+
+@@ -148,3 +153,67 @@ func TestRequestWithoutRemotePort(t *testing.T) {
+ t.Errorf("RemoteAddr: got %q; want %q", g, e)
+ }
+ }
++
++type countingWriter int
++
++func (c *countingWriter) Write(p []byte) (int, error) {
++ *c += countingWriter(len(p))
++ return len(p), nil
++}
++func (c *countingWriter) WriteString(p string) (int, error) {
++ *c += countingWriter(len(p))
++ return len(p), nil
++}
++
++func TestResponse(t *testing.T) {
++ var tests = []struct {
++ name string
++ body string
++ wantCT string
++ }{
++ {
++ name: "no body",
++ wantCT: "text/plain; charset=utf-8",
++ },
++ {
++ name: "html",
++ body: "<html><head><title>test page</title></head><body>This is a body</body></html>",
++ wantCT: "text/html; charset=utf-8",
++ },
++ {
++ name: "text",
++ body: strings.Repeat("gopher", 86),
++ wantCT: "text/plain; charset=utf-8",
++ },
++ {
++ name: "jpg",
++ body: "\xFF\xD8\xFF" + strings.Repeat("B", 1024),
++ wantCT: "image/jpeg",
++ },
++ }
++ for _, tt := range tests {
++ t.Run(tt.name, func(t *testing.T) {
++ var buf bytes.Buffer
++ resp := response{
++ req: httptest.NewRequest("GET", "/", nil),
++ header: http.Header{},
++ bufw: bufio.NewWriter(&buf),
++ }
++ n, err := resp.Write([]byte(tt.body))
++ if err != nil {
++ t.Errorf("Write: unexpected %v", err)
++ }
++ if want := len(tt.body); n != want {
++ t.Errorf("reported short Write: got %v want %v", n, want)
++ }
++ resp.writeCGIHeader(nil)
++ resp.Flush()
++ if got := resp.Header().Get("Content-Type"); got != tt.wantCT {
++ t.Errorf("wrong content-type: got %q, want %q", got, tt.wantCT)
++ }
++ if !bytes.HasSuffix(buf.Bytes(), []byte(tt.body)) {
++ t.Errorf("body was not correctly written")
++ }
++ })
++ }
++}
+diff --git a/src/net/http/cgi/integration_test.go b/src/net/http/cgi/integration_test.go
+index 32d59c09a3..295c3b82d4 100644
+--- a/src/net/http/cgi/integration_test.go
++++ b/src/net/http/cgi/integration_test.go
+@@ -16,7 +16,9 @@ import (
+ "io"
+ "net/http"
+ "net/http/httptest"
++ "net/url"
+ "os"
++ "strings"
+ "testing"
+ "time"
+ )
+@@ -52,7 +54,7 @@ func TestHostingOurselves(t *testing.T) {
+ }
+ replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
+
+- if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected {
++ if expected, got := "text/plain; charset=utf-8", replay.Header().Get("Content-Type"); got != expected {
+ t.Errorf("got a Content-Type of %q; expected %q", got, expected)
+ }
+ if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
+@@ -152,6 +154,51 @@ func TestChildOnlyHeaders(t *testing.T) {
+ }
+ }
+
++func TestChildContentType(t *testing.T) {
++ testenv.MustHaveExec(t)
++
++ h := &Handler{
++ Path: os.Args[0],
++ Root: "/test.go",
++ Args: []string{"-test.run=TestBeChildCGIProcess"},
++ }
++ var tests = []struct {
++ name string
++ body string
++ wantCT string
++ }{
++ {
++ name: "no body",
++ wantCT: "text/plain; charset=utf-8",
++ },
++ {
++ name: "html",
++ body: "<html><head><title>test page</title></head><body>This is a body</body></html>",
++ wantCT: "text/html; charset=utf-8",
++ },
++ {
++ name: "text",
++ body: strings.Repeat("gopher", 86),
++ wantCT: "text/plain; charset=utf-8",
++ },
++ {
++ name: "jpg",
++ body: "\xFF\xD8\xFF" + strings.Repeat("B", 1024),
++ wantCT: "image/jpeg",
++ },
++ }
++ for _, tt := range tests {
++ t.Run(tt.name, func(t *testing.T) {
++ expectedMap := map[string]string{"_body": tt.body}
++ req := fmt.Sprintf("GET /test.go?exact-body=%s HTTP/1.0\nHost: example.com\n\n", url.QueryEscape(tt.body))
++ replay := runCgiTest(t, h, req, expectedMap)
++ if got := replay.Header().Get("Content-Type"); got != tt.wantCT {
++ t.Errorf("got a Content-Type of %q; expected it to start with %q", got, tt.wantCT)
++ }
++ })
++ }
++}
++
+ // golang.org/issue/7198
+ func Test500WithNoHeaders(t *testing.T) { want500Test(t, "/immediate-disconnect") }
+ func Test500WithNoContentType(t *testing.T) { want500Test(t, "/no-content-type") }
+@@ -203,6 +250,10 @@ func TestBeChildCGIProcess(t *testing.T) {
+ if req.FormValue("no-body") == "1" {
+ return
+ }
++ if eb, ok := req.Form["exact-body"]; ok {
++ io.WriteString(rw, eb[0])
++ return
++ }
+ if req.FormValue("write-forever") == "1" {
+ io.Copy(rw, neverEnding('a'))
+ for {
+diff --git a/src/net/http/fcgi/child.go b/src/net/http/fcgi/child.go
+index 30a6b2ce2d..a31273b3ec 100644
+--- a/src/net/http/fcgi/child.go
++++ b/src/net/http/fcgi/child.go
+@@ -74,10 +74,12 @@ func (r *request) parseParams() {
+
+ // response implements http.ResponseWriter.
+ type response struct {
+- req *request
+- header http.Header
+- w *bufWriter
+- wroteHeader bool
++ req *request
++ header http.Header
++ code int
++ wroteHeader bool
++ wroteCGIHeader bool
++ w *bufWriter
+ }
+
+ func newResponse(c *child, req *request) *response {
+@@ -92,11 +94,14 @@ func (r *response) Header() http.Header {
+ return r.header
+ }
+
+-func (r *response) Write(data []byte) (int, error) {
++func (r *response) Write(p []byte) (n int, err error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+- return r.w.Write(data)
++ if !r.wroteCGIHeader {
++ r.writeCGIHeader(p)
++ }
++ return r.w.Write(p)
+ }
+
+ func (r *response) WriteHeader(code int) {
+@@ -104,22 +109,34 @@ func (r *response) WriteHeader(code int) {
+ return
+ }
+ r.wroteHeader = true
++ r.code = code
+ if code == http.StatusNotModified {
+ // Must not have body.
+ r.header.Del("Content-Type")
+ r.header.Del("Content-Length")
+ r.header.Del("Transfer-Encoding")
+- } else if r.header.Get("Content-Type") == "" {
+- r.header.Set("Content-Type", "text/html; charset=utf-8")
+ }
+-
+ if r.header.Get("Date") == "" {
+ r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
+ }
++}
+
+- fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
++// writeCGIHeader finalizes the header sent to the client and writes it to the output.
++// p is not written by writeHeader, but is the first chunk of the body
++// that will be written. It is sniffed for a Content-Type if none is
++// set explicitly.
++func (r *response) writeCGIHeader(p []byte) {
++ if r.wroteCGIHeader {
++ return
++ }
++ r.wroteCGIHeader = true
++ fmt.Fprintf(r.w, "Status: %d %s\r\n", r.code, http.StatusText(r.code))
++ if _, hasType := r.header["Content-Type"]; r.code != http.StatusNotModified && !hasType {
++ r.header.Set("Content-Type", http.DetectContentType(p))
++ }
+ r.header.Write(r.w)
+ r.w.WriteString("\r\n")
++ r.w.Flush()
+ }
+
+ func (r *response) Flush() {
+@@ -290,6 +307,8 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) {
+ httpReq = httpReq.WithContext(envVarCtx)
+ c.handler.ServeHTTP(r, httpReq)
+ }
++ // Make sure we serve something even if nothing was written to r
++ r.Write(nil)
+ r.Close()
+ c.mu.Lock()
+ delete(c.requests, req.reqId)
+diff --git a/src/net/http/fcgi/fcgi_test.go b/src/net/http/fcgi/fcgi_test.go
+index e9d2b34023..4a27a12c35 100644
+--- a/src/net/http/fcgi/fcgi_test.go
++++ b/src/net/http/fcgi/fcgi_test.go
+@@ -10,6 +10,7 @@ import (
+ "io"
+ "io/ioutil"
+ "net/http"
++ "strings"
+ "testing"
+ )
+
+@@ -344,3 +345,54 @@ func TestChildServeReadsEnvVars(t *testing.T) {
+ <-done
+ }
+ }
++
++func TestResponseWriterSniffsContentType(t *testing.T) {
++ var tests = []struct {
++ name string
++ body string
++ wantCT string
++ }{
++ {
++ name: "no body",
++ wantCT: "text/plain; charset=utf-8",
++ },
++ {
++ name: "html",
++ body: "<html><head><title>test page</title></head><body>This is a body</body></html>",
++ wantCT: "text/html; charset=utf-8",
++ },
++ {
++ name: "text",
++ body: strings.Repeat("gopher", 86),
++ wantCT: "text/plain; charset=utf-8",
++ },
++ {
++ name: "jpg",
++ body: "\xFF\xD8\xFF" + strings.Repeat("B", 1024),
++ wantCT: "image/jpeg",
++ },
++ }
++ for _, tt := range tests {
++ t.Run(tt.name, func(t *testing.T) {
++ input := make([]byte, len(streamFullRequestStdin))
++ copy(input, streamFullRequestStdin)
++ rc := nopWriteCloser{bytes.NewBuffer(input)}
++ done := make(chan bool)
++ var resp *response
++ c := newChild(rc, http.HandlerFunc(func(
++ w http.ResponseWriter,
++ r *http.Request,
++ ) {
++ io.WriteString(w, tt.body)
++ resp = w.(*response)
++ done <- true
++ }))
++ defer c.cleanUp()
++ go c.serve()
++ <-done
++ if got := resp.Header().Get("Content-Type"); got != tt.wantCT {
++ t.Errorf("got a Content-Type of %q; expected it to start with %q", got, tt.wantCT)
++ }
++ })
++ }
++}
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/mtd/mtd-utils/0001-mtd-utils-Fix-return-value-of-ubiformat.patch b/meta/recipes-devtools/mtd/mtd-utils/0001-mtd-utils-Fix-return-value-of-ubiformat.patch
new file mode 100644
index 0000000000..d43f7e1a7a
--- /dev/null
+++ b/meta/recipes-devtools/mtd/mtd-utils/0001-mtd-utils-Fix-return-value-of-ubiformat.patch
@@ -0,0 +1,62 @@
+From 4d19bffcfd66e25d3ee74536ae2d2da7ad52e8e2 Mon Sep 17 00:00:00 2001
+From: Barry Grussling <barry@grussling.com>
+Date: Sun, 12 Jan 2020 12:33:32 -0800
+Subject: [PATCH] mtd-utils: Fix return value of ubiformat
+Organization: O.S. Systems Software LTDA.
+
+This changeset fixes a feature regression in ubiformat. Older versions of
+ubiformat, when invoked with a flash-image, would return 0 in the case no error
+was encountered. Upon upgrading to latest, it was discovered that ubiformat
+returned 255 even without encountering an error condition.
+
+This changeset corrects the above issue and causes ubiformat, when given an
+image file, to return 0 when no errors are detected.
+
+Tested by running through my loading scripts and verifying ubiformat returned
+0.
+
+Upstream-Status: Backport [2.1.2]
+
+Signed-off-by: Barry Grussling <barry@grussling.com>
+Signed-off-by: David Oberhollenzer <david.oberhollenzer@sigma-star.at>
+Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
+---
+ ubi-utils/ubiformat.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/ubi-utils/ubiformat.c b/ubi-utils/ubiformat.c
+index a90627c..5377b12 100644
+--- a/ubi-utils/ubiformat.c
++++ b/ubi-utils/ubiformat.c
+@@ -550,6 +550,7 @@ static int format(libmtd_t libmtd, const struct mtd_dev_info *mtd,
+ struct ubi_vtbl_record *vtbl;
+ int eb1 = -1, eb2 = -1;
+ long long ec1 = -1, ec2 = -1;
++ int ret = -1;
+
+ write_size = UBI_EC_HDR_SIZE + mtd->subpage_size - 1;
+ write_size /= mtd->subpage_size;
+@@ -643,8 +644,10 @@ static int format(libmtd_t libmtd, const struct mtd_dev_info *mtd,
+ if (!args.quiet && !args.verbose)
+ printf("\n");
+
+- if (novtbl)
++ if (novtbl) {
++ ret = 0;
+ goto out_free;
++ }
+
+ if (eb1 == -1 || eb2 == -1) {
+ errmsg("no eraseblocks for volume table");
+@@ -669,7 +672,7 @@ static int format(libmtd_t libmtd, const struct mtd_dev_info *mtd,
+
+ out_free:
+ free(hdr);
+- return -1;
++ return ret;
+ }
+
+ int main(int argc, char * const argv[])
+--
+2.27.0
+
diff --git a/meta/recipes-devtools/mtd/mtd-utils_git.bb b/meta/recipes-devtools/mtd/mtd-utils_git.bb
index 810fe40f4e..d1658a739b 100644
--- a/meta/recipes-devtools/mtd/mtd-utils_git.bb
+++ b/meta/recipes-devtools/mtd/mtd-utils_git.bb
@@ -15,6 +15,7 @@ PV = "2.1.1"
SRCREV = "4443221ce9b88440cd9f5bb78e6fe95621d36c8a"
SRC_URI = "git://git.infradead.org/mtd-utils.git \
file://add-exclusion-to-mkfs-jffs2-git-2.patch \
+ file://0001-mtd-utils-Fix-return-value-of-ubiformat.patch \
"
S = "${WORKDIR}/git/"
diff --git a/meta/recipes-devtools/nasm/nasm/0001-BR3392712-pp_tokline-fix-double-free.patch b/meta/recipes-devtools/nasm/nasm/0001-BR3392712-pp_tokline-fix-double-free.patch
new file mode 100644
index 0000000000..b2cd3fe24b
--- /dev/null
+++ b/meta/recipes-devtools/nasm/nasm/0001-BR3392712-pp_tokline-fix-double-free.patch
@@ -0,0 +1,36 @@
+From 652c58c92d9e8eaf09a0eb125c4fe2d4b6cc3397 Mon Sep 17 00:00:00 2001
+From: Cyrill Gorcunov <gorcunov@gmail.com>
+Date: Tue, 15 Sep 2020 15:50:20 +0800
+Subject: [PATCH] BR3392712: pp_tokline: fix double free
+
+Make sure the data being freed get double
+freed after -- the pointers must be zapped
+(actually nasm_free and free_tlist support
+being called with NULL pointer as an argument).
+
+Upstream-Status: Backport [https://github.com/netwide-assembler/nasm/commit/8806c3ca007b84accac21dd88b900fb03614ceb7]
+CVE: CVE-2020-24978
+
+Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
+Signed-off-by: Wenlin Kang <wenlin.kang@windriver.com>
+---
+ asm/preproc.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/asm/preproc.c b/asm/preproc.c
+index 2737ec1..da2c9c9 100644
+--- a/asm/preproc.c
++++ b/asm/preproc.c
+@@ -5119,6 +5119,9 @@ static char *pp_getline(void)
+ free_tlist(m->iline);
+ nasm_free(m->paramlen);
+ l->finishes->in_progress = 0;
++ m->params = NULL;
++ m->iline = NULL;
++ m->paramlen = NULL;
+ }
+ }
+
+--
+2.13.3
+
diff --git a/meta/recipes-devtools/nasm/nasm_2.14.02.bb b/meta/recipes-devtools/nasm/nasm_2.14.02.bb
index bd4ecea8b6..bb2b58f87e 100644
--- a/meta/recipes-devtools/nasm/nasm_2.14.02.bb
+++ b/meta/recipes-devtools/nasm/nasm_2.14.02.bb
@@ -6,6 +6,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe"
SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \
file://CVE-2018-19755.patch \
file://CVE-2019-14248.patch \
+ file://0001-BR3392712-pp_tokline-fix-double-free.patch \
"
SRC_URI[md5sum] = "3f489aa48ad2aa1f967dc5e293bbd06f"
diff --git a/meta/recipes-devtools/opkg-utils/opkg-utils/0001-Switch-all-scripts-to-use-Python-3.x.patch b/meta/recipes-devtools/opkg-utils/opkg-utils/0001-Switch-all-scripts-to-use-Python-3.x.patch
deleted file mode 100644
index 691ed50c2b..0000000000
--- a/meta/recipes-devtools/opkg-utils/opkg-utils/0001-Switch-all-scripts-to-use-Python-3.x.patch
+++ /dev/null
@@ -1,113 +0,0 @@
-From d42b23f4fb5d6bd58e92e995fe5befc76efbae0c Mon Sep 17 00:00:00 2001
-From: Alexander Kanavin <alex.kanavin@gmail.com>
-Date: Thu, 27 Apr 2017 15:47:58 +0300
-Subject: [PATCH] Switch all scripts to use Python 3.x
-
-Upstream-Status: Pending
-Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
----
- makePackage | 2 +-
- opkg-compare-indexes | 2 +-
- opkg-graph-deps | 2 +-
- opkg-list-fields | 2 +-
- opkg-make-index | 2 +-
- opkg-show-deps | 2 +-
- opkg-unbuild | 2 +-
- opkg-update-index | 2 +-
- opkg.py | 2 +-
- 9 files changed, 9 insertions(+), 9 deletions(-)
-
-diff --git a/makePackage b/makePackage
-index 4bdfc56..02124dd 100755
---- a/makePackage
-+++ b/makePackage
-@@ -1,4 +1,4 @@
--#!/usr/bin/python
-+#!/usr/bin/env python3
-
- # The general algorithm this program follows goes like this:
- # Run tar to extract control from control.tar.gz from the package.
-diff --git a/opkg-compare-indexes b/opkg-compare-indexes
-index b60d20a..80c1263 100755
---- a/opkg-compare-indexes
-+++ b/opkg-compare-indexes
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- from __future__ import absolute_import
- from __future__ import print_function
-
-diff --git a/opkg-graph-deps b/opkg-graph-deps
-index 6653fd5..f1e376a 100755
---- a/opkg-graph-deps
-+++ b/opkg-graph-deps
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- from __future__ import absolute_import
- from __future__ import print_function
-
-diff --git a/opkg-list-fields b/opkg-list-fields
-index c14a90f..24f7955 100755
---- a/opkg-list-fields
-+++ b/opkg-list-fields
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- from __future__ import absolute_import
- from __future__ import print_function
-
-diff --git a/opkg-make-index b/opkg-make-index
-index 3f757f6..2988f9f 100755
---- a/opkg-make-index
-+++ b/opkg-make-index
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- """
- Utility to create opkg compatible indexes
- """
-
-diff --git a/opkg-show-deps b/opkg-show-deps
-index 153f21e..4e18b4f 100755
---- a/opkg-show-deps
-+++ b/opkg-show-deps
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- from __future__ import absolute_import
- from __future__ import print_function
-
-diff --git a/opkg-unbuild b/opkg-unbuild
-index 4f36bec..57642c9 100755
---- a/opkg-unbuild
-+++ b/opkg-unbuild
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- from __future__ import absolute_import
- from __future__ import print_function
-
-diff --git a/opkg-update-index b/opkg-update-index
-index 341c1c2..7bff8a1 100755
---- a/opkg-update-index
-+++ b/opkg-update-index
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- from __future__ import absolute_import
-
- import sys, os
-diff --git a/opkg.py b/opkg.py
-index 2ecac8a..7e64de4 100644
---- a/opkg.py
-+++ b/opkg.py
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- # Copyright (C) 2001 Alexander S. Guy <a7r@andern.org>
- # Andern Research Labs
- #
---
-2.11.0
-
diff --git a/meta/recipes-devtools/opkg-utils/opkg-utils/0001-opkg-build-clamp-mtimes-to-SOURCE_DATE_EPOCH.patch b/meta/recipes-devtools/opkg-utils/opkg-utils/0001-opkg-build-clamp-mtimes-to-SOURCE_DATE_EPOCH.patch
deleted file mode 100644
index a181169d47..0000000000
--- a/meta/recipes-devtools/opkg-utils/opkg-utils/0001-opkg-build-clamp-mtimes-to-SOURCE_DATE_EPOCH.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-Upstream-Status: Backport
-Signed-off-by: Ross Burton <ross.burton@intel.com>
-
-From 59da5577bf8df441c6ca958e50fcb83228702764 Mon Sep 17 00:00:00 2001
-From: Alejandro del Castillo <alejandro.delcastillo@ni.com>
-Date: Thu, 12 Sep 2019 10:24:58 -0500
-Subject: [PATCH] opkg-build: clamp mtimes to SOURCE_DATE_EPOCH
-
-For reproducible builds, clamp mtimes bigger than SOURCE_DATE_EPOCH to
-SOURCE_DATE_EPOCH (build generated files, usually).
-
-Fixes bugzilla 13450
-
-Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
-Signed-off-by: Ross Burton <ross.burton@intel.com>
----
- opkg-build | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
-
-diff --git a/opkg-build b/opkg-build
-index dcd2d68..2517a2b 100755
---- a/opkg-build
-+++ b/opkg-build
-@@ -297,9 +297,16 @@ mkdir $tmp_dir
-
- build_date="${SOURCE_DATE_EPOCH:-$(date +%s)}"
-
-+mtime_args=""
-+# --clamp-mtime requires tar > 1.28. Only use it if SOURCE_DATE_EPOCH is set, to avoid having a generic case dependency on tar > 1.28.
-+# this setting will make sure files generated at build time have consistent mtimes, for reproducible builds.
-+if [ ! -z "$SOURCE_DATE_EPOCH" ]; then
-+ mtime_args="--mtime=@$build_date --clamp-mtime"
-+fi
-+
- ( cd $pkg_dir/$CONTROL && find . -type f > $tmp_dir/control_list )
- ( cd $pkg_dir && find . -path ./$CONTROL -prune -o -print > $tmp_dir/file_list )
--( cd $pkg_dir && tar $ogargs $tsortargs --no-recursion -c $tarformat -T $tmp_dir/file_list | $compressor $compressorargs > $tmp_dir/data.tar.$cext )
-+( cd $pkg_dir && tar $ogargs $tsortargs --no-recursion $mtime_args -c $tarformat -T $tmp_dir/file_list | $compressor $compressorargs > $tmp_dir/data.tar.$cext )
- ( cd $pkg_dir/$CONTROL && tar $ogargs $tsortargs --no-recursion --mtime=@$build_date -c $tarformat -T $tmp_dir/control_list | gzip $zipargs > $tmp_dir/control.tar.gz )
- rm $tmp_dir/file_list
- rm $tmp_dir/control_list
---
-2.20.1
-
diff --git a/meta/recipes-devtools/opkg-utils/opkg-utils/fix-reproducibility.patch b/meta/recipes-devtools/opkg-utils/opkg-utils/fix-reproducibility.patch
new file mode 100644
index 0000000000..945979bc8a
--- /dev/null
+++ b/meta/recipes-devtools/opkg-utils/opkg-utils/fix-reproducibility.patch
@@ -0,0 +1,32 @@
+Fix reproducibility issues in opkg-build
+
+There is a sorting problem with opkg-build where the ipk generated is depending
+upon the order of files on disk. The reason is the --sort option to tar only
+influences the orders of files tar reads, not those passed by the -T option.
+
+Add in a sort call to resolve this issue. To ensure consistent sorting we
+also need to force to a specific locale (C) else the results are still not
+deterministic.
+
+RP 2020/2/5
+
+Upstream-Status: Submitted [https://groups.google.com/forum/#!topic/opkg-devel/YttZ73NLrYQ]
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: opkg-utils-0.4.2/opkg-build
+===================================================================
+--- opkg-utils-0.4.2.orig/opkg-build
++++ opkg-utils-0.4.2/opkg-build
+@@ -305,8 +305,10 @@ if [ ! -z "$SOURCE_DATE_EPOCH" ]; then
+ mtime_args="--mtime=@$build_date --clamp-mtime"
+ fi
+
+-( cd $pkg_dir/$CONTROL && find . -type f > $tmp_dir/control_list )
+-( cd $pkg_dir && find . -path ./$CONTROL -prune -o -path . -o -print > $tmp_dir/file_list )
++export LANG=C
++export LC_ALL=C
++( cd $pkg_dir/$CONTROL && find . -type f | sort > $tmp_dir/control_list )
++( cd $pkg_dir && find . -path ./$CONTROL -prune -o -path . -o -print | sort > $tmp_dir/file_list )
+ ( cd $pkg_dir && tar $ogargs $tsortargs --no-recursion $mtime_args -c $tarformat -T $tmp_dir/file_list | $compressor $compressorargs > $tmp_dir/data.tar.$cext )
+ ( cd $pkg_dir/$CONTROL && tar $ogargs $tsortargs --no-recursion --mtime=@$build_date -c $tarformat -T $tmp_dir/control_list | gzip $zipargs > $tmp_dir/control.tar.gz )
+ rm $tmp_dir/file_list
diff --git a/meta/recipes-devtools/opkg-utils/opkg-utils/pipefail.patch b/meta/recipes-devtools/opkg-utils/opkg-utils/pipefail.patch
deleted file mode 100644
index 55ddcc1fd2..0000000000
--- a/meta/recipes-devtools/opkg-utils/opkg-utils/pipefail.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-We need opkg-build to fail if for example the tar command is passed invalid
-options. Without this, we see silently created empty packaged where data.tar
-is zero bytes in size. This creates hard to debug problems.
-
-An example is when reproducible builds are enabled and run on old hosts like
-centos7 which has tar < 1.28:
-
-Subprocess output:tar: unrecognized option '--clamp-mtime'
-Try `tar --help' or `tar --usage' for more information.
-
-Upstream-Status: Pending
-Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-
-Index: opkg-utils-0.4.1/opkg-build
-===================================================================
---- opkg-utils-0.4.1.orig/opkg-build
-+++ opkg-utils-0.4.1/opkg-build
-@@ -1,4 +1,4 @@
--#!/bin/sh
-+#!/bin/bash
-
- : <<=cut
- =head1 NAME
-@@ -12,6 +12,7 @@ opkg-build - construct an .opk from a di
- # Updated to work on Familiar Pre0.7rc1, with busybox tar.
- # Note it Requires: binutils-ar (since the busybox ar can't create)
- set -e
-+set -o pipefail
-
- version=1.0
-
diff --git a/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.1.bb b/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.2.bb
index eb6c7a3a6a..042eec7e0e 100644
--- a/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.1.bb
+++ b/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.2.bb
@@ -4,19 +4,16 @@ SECTION = "base"
HOMEPAGE = "http://git.yoctoproject.org/cgit/cgit.cgi/opkg-utils"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \
- file://opkg.py;beginline=2;endline=18;md5=63ce9e6bcc445181cd9e4baf4b4ccc35"
+ file://opkg.py;beginline=2;endline=18;md5=ffa11ff3c15eb31c6a7ceaa00cc9f986"
PROVIDES += "${@bb.utils.contains('PACKAGECONFIG', 'update-alternatives', 'virtual/update-alternatives', '', d)}"
-SRC_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/${BPN}/snapshot/${BPN}-${PV}.tar.gz \
- file://0001-Switch-all-scripts-to-use-Python-3.x.patch \
- file://0001-opkg-build-clamp-mtimes-to-SOURCE_DATE_EPOCH.patch \
- file://pipefail.patch \
+SRC_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/${BPN}/snapshot/${BPN}-${PV}.tar.gz \
+ file://fix-reproducibility.patch \
"
UPSTREAM_CHECK_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/opkg-utils/refs/"
-
-SRC_URI[md5sum] = "8c140f835b694a0c27cfb23d2426a02b"
-SRC_URI[sha256sum] = "9ea9efdd9fe13661ad251e3a2860c1c93045adcfaa6659c3e86d9748ecda3b6e"
+SRC_URI[md5sum] = "cc210650644fcb9bba06ad5ec95a63ec"
+SRC_URI[sha256sum] = "5929ad87d541789e0b82d626db01a1201ac48df6f49f2262fcfb86cf815e5d6c"
TARGET_CC_ARCH += "${LDFLAGS}"
diff --git a/meta/recipes-devtools/patch/patch_2.7.6.bb b/meta/recipes-devtools/patch/patch_2.7.6.bb
index 5d7f55f8dc..b5897b357a 100644
--- a/meta/recipes-devtools/patch/patch_2.7.6.bb
+++ b/meta/recipes-devtools/patch/patch_2.7.6.bb
@@ -22,3 +22,6 @@ acpaths = "-I ${S}/m4 "
PACKAGECONFIG ?= "${@bb.utils.filter('DISTRO_FEATURES', 'xattr', d)}"
PACKAGECONFIG[xattr] = "--enable-xattr,--disable-xattr,attr,"
+PROVIDES_append_class-native = " patch-replacement-native"
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-devtools/patchelf/patchelf/fix-phdrs.patch b/meta/recipes-devtools/patchelf/patchelf/fix-phdrs.patch
new file mode 100644
index 0000000000..d087bd7855
--- /dev/null
+++ b/meta/recipes-devtools/patchelf/patchelf/fix-phdrs.patch
@@ -0,0 +1,37 @@
+When running patchelf on some existing patchelf'd binaries to change to longer
+RPATHS, ldd would report the binaries as invalid. The output of objdump -x on
+those libraryies should show the top of the .dynamic section is getting trashed,
+something like:
+
+0x600000001 0x0000000000429000
+0x335000 0x0000000000335000
+0xc740 0x000000000000c740
+0x1000 0x0000000000009098
+SONAME libglib-2.0.so.0
+
+(which should be RPATH and DT_NEEDED entries)
+
+This was tracked down to the code which injects the PT_LOAD section.
+
+The issue is that if the program headers were previously relocated to the end
+of the file which was how patchelf operated previously, the relocation code
+wouldn't work properly on a second run as it now assumes they're located after
+the elf header. This change forces them back to immediately follow the elf
+header which is where the code has made space for them.
+
+Upstream-Status: Submitted [https://github.com/NixOS/patchelf/pull/202]
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+RP 2020/6/2
+
+Index: git/src/patchelf.cc
+===================================================================
+--- git.orig/src/patchelf.cc
++++ git/src/patchelf.cc
+@@ -762,6 +762,7 @@ void ElfFile<ElfFileParamNames>::rewrite
+ }
+
+ /* Add a segment that maps the replaced sections into memory. */
++ wri(hdr->e_phoff, sizeof(Elf_Ehdr));
+ phdrs.resize(rdi(hdr->e_phnum) + 1);
+ wri(hdr->e_phnum, rdi(hdr->e_phnum) + 1);
+ Elf_Phdr & phdr = phdrs[rdi(hdr->e_phnum) - 1];
diff --git a/meta/recipes-devtools/patchelf/patchelf_0.10.bb b/meta/recipes-devtools/patchelf/patchelf_0.10.bb
index cc983e033a..e4a604ec70 100644
--- a/meta/recipes-devtools/patchelf/patchelf_0.10.bb
+++ b/meta/recipes-devtools/patchelf/patchelf_0.10.bb
@@ -1,6 +1,7 @@
SRC_URI = "https://nixos.org/releases/${BPN}/${BPN}-${PV}/${BPN}-${PV}.tar.bz2 \
file://handle-read-only-files.patch \
file://fix-adjusting-startPage.patch \
+ file://fix-phdrs.patch \
"
LICENSE = "GPLv3"
diff --git a/meta/recipes-devtools/perl/files/0001-tests-adjust-to-correctly-exclude-unbuilt-extensions.patch b/meta/recipes-devtools/perl/files/0001-tests-adjust-to-correctly-exclude-unbuilt-extensions.patch
new file mode 100644
index 0000000000..0f3a2c6327
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/0001-tests-adjust-to-correctly-exclude-unbuilt-extensions.patch
@@ -0,0 +1,27 @@
+From b0d53cfd785f64002128ac5eecc4aed0663d9c30 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Thu, 9 Jan 2020 17:26:55 +0100
+Subject: [PATCH] tests: adjust to correctly exclude unbuilt extensions
+
+Issue is reported here:
+https://github.com/arsv/perl-cross/issues/85
+
+Upstream-Status: Inappropriate [issue caused by perl-cross]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ t/TEST | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/t/TEST b/t/TEST
+index a9c844f..8d3505f 100755
+--- a/t/TEST
++++ b/t/TEST
+@@ -419,7 +419,7 @@ sub _tests_from_manifest {
+ while (<MANI>) {
+ if (m!^((?:cpan|dist|ext)/(\S+)/+(?:[^/\s]+\.t|test\.pl)|lib/\S+?(?:\.t|test\.pl))\s!) {
+ my $t = $1;
+- my $extension = $2;
++ my $extension = $1."/".$2;
+
+ # XXX Generates way too many error lines currently. Skip for
+ # v5.22
diff --git a/meta/recipes-devtools/perl/files/CVE-2020-10543.patch b/meta/recipes-devtools/perl/files/CVE-2020-10543.patch
new file mode 100644
index 0000000000..36dff0aac9
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2020-10543.patch
@@ -0,0 +1,36 @@
+From 897d1f7fd515b828e4b198d8b8bef76c6faf03ed Mon Sep 17 00:00:00 2001
+From: John Lightsey <jd@cpanel.net>
+Date: Wed, 20 Nov 2019 20:02:45 -0600
+Subject: [PATCH] regcomp.c: Prevent integer overflow from nested regex
+ quantifiers.
+
+(CVE-2020-10543) On 32bit systems the size calculations for nested regular
+expression quantifiers could overflow causing heap memory corruption.
+
+Fixes: Perl/perl5-security#125
+(cherry picked from commit bfd31397db5dc1a5c5d3e0a1f753a4f89a736e71)
+
+Upstream-Status: Backport [https://github.com/perl/perl5/commit/897d1f7fd515b828e4b198d8b8bef76c6faf03ed]
+CVE: CVE-2020-10543
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ regcomp.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/regcomp.c b/regcomp.c
+index 93c8d98fbb0..5f86be8086d 100644
+--- a/regcomp.c
++++ b/regcomp.c
+@@ -5489,6 +5489,12 @@ S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp,
+ RExC_precomp)));
+ }
+
++ if ( ( minnext > 0 && mincount >= SSize_t_MAX / minnext )
++ || min >= SSize_t_MAX - minnext * mincount )
++ {
++ FAIL("Regexp out of space");
++ }
++
+ min += minnext * mincount;
+ is_inf_internal |= deltanext == SSize_t_MAX
+ || (maxcount == REG_INFTY && minnext + deltanext > 0);
diff --git a/meta/recipes-devtools/perl/files/CVE-2020-10878_1.patch b/meta/recipes-devtools/perl/files/CVE-2020-10878_1.patch
new file mode 100644
index 0000000000..b86085a551
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2020-10878_1.patch
@@ -0,0 +1,152 @@
+From 0a320d753fe7fca03df259a4dfd8e641e51edaa8 Mon Sep 17 00:00:00 2001
+From: Hugo van der Sanden <hv@crypt.org>
+Date: Tue, 18 Feb 2020 13:51:16 +0000
+Subject: [PATCH] study_chunk: extract rck_elide_nothing
+
+(CVE-2020-10878)
+
+(cherry picked from commit 93dee06613d4e1428fb10905ce1c3c96f53113dc)
+
+Upstream-Status: Backport [https://github.com/perl/perl5/commit/0a320d753fe7fca03df259a4dfd8e641e51edaa8]
+CVE: CVE-2020-10878
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ embed.fnc | 1 +
+ embed.h | 1 +
+ proto.h | 3 +++
+ regcomp.c | 70 ++++++++++++++++++++++++++++++++++---------------------
+ 4 files changed, 48 insertions(+), 27 deletions(-)
+
+diff --git a/embed.fnc b/embed.fnc
+index aedb4baef19..d7cd04d3fc3 100644
+--- a/embed.fnc
++++ b/embed.fnc
+@@ -2481,6 +2481,7 @@ Es |SSize_t|study_chunk |NN RExC_state_t *pRExC_state \
+ |I32 stopparen|U32 recursed_depth \
+ |NULLOK regnode_ssc *and_withp \
+ |U32 flags|U32 depth
++Es |void |rck_elide_nothing|NN regnode *node
+ EsR |SV * |get_ANYOFM_contents|NN const regnode * n
+ EsRn |U32 |add_data |NN RExC_state_t* const pRExC_state \
+ |NN const char* const s|const U32 n
+diff --git a/embed.h b/embed.h
+index 75c91f77f45..356a8b98d96 100644
+--- a/embed.h
++++ b/embed.h
+@@ -1208,6 +1208,7 @@
+ #define parse_lparen_question_flags(a) S_parse_lparen_question_flags(aTHX_ a)
+ #define parse_uniprop_string(a,b,c,d,e,f,g,h,i) Perl_parse_uniprop_string(aTHX_ a,b,c,d,e,f,g,h,i)
+ #define populate_ANYOF_from_invlist(a,b) S_populate_ANYOF_from_invlist(aTHX_ a,b)
++#define rck_elide_nothing(a) S_rck_elide_nothing(aTHX_ a)
+ #define reg(a,b,c,d) S_reg(aTHX_ a,b,c,d)
+ #define reg2Lanode(a,b,c,d) S_reg2Lanode(aTHX_ a,b,c,d)
+ #define reg_node(a,b) S_reg_node(aTHX_ a,b)
+diff --git a/proto.h b/proto.h
+index 141ddbaee6d..f316fe134e1 100644
+--- a/proto.h
++++ b/proto.h
+@@ -5543,6 +5543,9 @@ PERL_CALLCONV SV * Perl_parse_uniprop_string(pTHX_ const char * const name, cons
+ STATIC void S_populate_ANYOF_from_invlist(pTHX_ regnode *node, SV** invlist_ptr);
+ #define PERL_ARGS_ASSERT_POPULATE_ANYOF_FROM_INVLIST \
+ assert(node); assert(invlist_ptr)
++STATIC void S_rck_elide_nothing(pTHX_ regnode *node);
++#define PERL_ARGS_ASSERT_RCK_ELIDE_NOTHING \
++ assert(node)
+ PERL_STATIC_NO_RET void S_re_croak2(pTHX_ bool utf8, const char* pat1, const char* pat2, ...)
+ __attribute__noreturn__;
+ #define PERL_ARGS_ASSERT_RE_CROAK2 \
+diff --git a/regcomp.c b/regcomp.c
+index 5f86be8086d..4ba2980db66 100644
+--- a/regcomp.c
++++ b/regcomp.c
+@@ -4450,6 +4450,44 @@ S_unwind_scan_frames(pTHX_ const void *p)
+ } while (f);
+ }
+
++/* Follow the next-chain of the current node and optimize away
++ all the NOTHINGs from it.
++ */
++STATIC void
++S_rck_elide_nothing(pTHX_ regnode *node)
++{
++ dVAR;
++
++ PERL_ARGS_ASSERT_RCK_ELIDE_NOTHING;
++
++ if (OP(node) != CURLYX) {
++ const int max = (reg_off_by_arg[OP(node)]
++ ? I32_MAX
++ /* I32 may be smaller than U16 on CRAYs! */
++ : (I32_MAX < U16_MAX ? I32_MAX : U16_MAX));
++ int off = (reg_off_by_arg[OP(node)] ? ARG(node) : NEXT_OFF(node));
++ int noff;
++ regnode *n = node;
++
++ /* Skip NOTHING and LONGJMP. */
++ while (
++ (n = regnext(n))
++ && (
++ (PL_regkind[OP(n)] == NOTHING && (noff = NEXT_OFF(n)))
++ || ((OP(n) == LONGJMP) && (noff = ARG(n)))
++ )
++ && off + noff < max
++ ) {
++ off += noff;
++ }
++ if (reg_off_by_arg[OP(node)])
++ ARG(node) = off;
++ else
++ NEXT_OFF(node) = off;
++ }
++ return;
++}
++
+ /* the return from this sub is the minimum length that could possibly match */
+ STATIC SSize_t
+ S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp,
+@@ -4550,28 +4588,10 @@ S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp,
+ */
+ JOIN_EXACT(scan,&min_subtract, &unfolded_multi_char, 0);
+
+- /* Follow the next-chain of the current node and optimize
+- away all the NOTHINGs from it. */
+- if (OP(scan) != CURLYX) {
+- const int max = (reg_off_by_arg[OP(scan)]
+- ? I32_MAX
+- /* I32 may be smaller than U16 on CRAYs! */
+- : (I32_MAX < U16_MAX ? I32_MAX : U16_MAX));
+- int off = (reg_off_by_arg[OP(scan)] ? ARG(scan) : NEXT_OFF(scan));
+- int noff;
+- regnode *n = scan;
+-
+- /* Skip NOTHING and LONGJMP. */
+- while ((n = regnext(n))
+- && ((PL_regkind[OP(n)] == NOTHING && (noff = NEXT_OFF(n)))
+- || ((OP(n) == LONGJMP) && (noff = ARG(n))))
+- && off + noff < max)
+- off += noff;
+- if (reg_off_by_arg[OP(scan)])
+- ARG(scan) = off;
+- else
+- NEXT_OFF(scan) = off;
+- }
++ /* Follow the next-chain of the current node and optimize
++ away all the NOTHINGs from it.
++ */
++ rck_elide_nothing(scan);
+
+ /* The principal pseudo-switch. Cannot be a switch, since we
+ look into several different things. */
+@@ -5745,11 +5765,7 @@ Perl_re_printf( aTHX_ "LHS=%" UVuf " RHS=%" UVuf "\n",
+ if (data && (fl & SF_HAS_EVAL))
+ data->flags |= SF_HAS_EVAL;
+ optimize_curly_tail:
+- if (OP(oscan) != CURLYX) {
+- while (PL_regkind[OP(next = regnext(oscan))] == NOTHING
+- && NEXT_OFF(next))
+- NEXT_OFF(oscan) += NEXT_OFF(next);
+- }
++ rck_elide_nothing(oscan);
+ continue;
+
+ default:
diff --git a/meta/recipes-devtools/perl/files/CVE-2020-10878_2.patch b/meta/recipes-devtools/perl/files/CVE-2020-10878_2.patch
new file mode 100644
index 0000000000..0bacd6b192
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2020-10878_2.patch
@@ -0,0 +1,36 @@
+From 3295b48defa0f8570114877b063fe546dd348b3c Mon Sep 17 00:00:00 2001
+From: Karl Williamson <khw@cpan.org>
+Date: Thu, 20 Feb 2020 17:49:36 +0000
+Subject: [PATCH] regcomp: use long jumps if there is any possibility of
+ overflow
+
+(CVE-2020-10878) Be conservative for backporting, we'll aim to do
+something more aggressive for bleadperl.
+
+(cherry picked from commit 9d7759db46f3b31b1d3f79c44266b6ba42a47fc6)
+
+Upstream-Status: Backport [https://github.com/perl/perl5/commit/3295b48defa0f8570114877b063fe546dd348b3c]
+CVE: CVE-2020-10878
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ regcomp.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/regcomp.c b/regcomp.c
+index 4ba2980db66..73c35a67020 100644
+--- a/regcomp.c
++++ b/regcomp.c
+@@ -7762,6 +7762,13 @@ Perl_re_op_compile(pTHX_ SV ** const patternp, int pat_count,
+
+ /* We have that number in RExC_npar */
+ RExC_total_parens = RExC_npar;
++
++ /* XXX For backporting, use long jumps if there is any possibility of
++ * overflow */
++ if (RExC_size > U16_MAX && ! RExC_use_BRANCHJ) {
++ RExC_use_BRANCHJ = TRUE;
++ flags |= RESTART_PARSE;
++ }
+ }
+ else if (! MUST_RESTART(flags)) {
+ ReREFCNT_dec(Rx);
diff --git a/meta/recipes-devtools/perl/files/determinism.patch b/meta/recipes-devtools/perl/files/determinism.patch
new file mode 100644
index 0000000000..ed4d06f5ec
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/determinism.patch
@@ -0,0 +1,81 @@
+Fixes to make the perl build reproducible:
+
+a) Remove the \n from configure_attr.sh since it gets quoted differently depending on
+ whether the shell is bash or dash which can cause the test result to be incorrect.
+ Reported upstream: https://github.com/arsv/perl-cross/issues/87
+
+b) Sort the order of the module lists from configure_mods.sh since otherwise
+ the result isn't the same leading to makefile differences.
+ Reported upstream: https://github.com/arsv/perl-cross/issues/88
+
+c) Sort the Encode::Byte byte_t.fnm file output (and the makefile depends whilst
+ there for good measure)
+ This needs to go to upstream perl (not done)
+
+d) Use bash for perl-cross configure since otherwise trnl gets set to "\n" with bash
+ and "" with dash
+ Reported upstream: https://github.com/arsv/perl-cross/issues/87
+
+RP 2020/2/7
+
+Upstream-Status: Pending [75% submitted]
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org
+
+Index: perl-5.30.1/cnf/configure_attr.sh
+===================================================================
+--- perl-5.30.1.orig/cnf/configure_attr.sh
++++ perl-5.30.1/cnf/configure_attr.sh
+@@ -131,7 +131,7 @@ if not hinted d_c99_variadic_macros 'sup
+ try_start
+ try_add '#include <stdio.h>'
+ try_add '#define foo(fmt, ...) printf(fmt, __VA_ARGS__)'
+- try_add 'int main(void) { foo("%i\n", 1234); return 0; }'
++ try_add 'int main(void) { foo("%i", 1234); return 0; }'
+ try_compile
+ resdef d_c99_variadic_macros 'supported' 'missing'
+ fi
+Index: perl-5.30.1/cnf/configure_mods.sh
+===================================================================
+--- perl-5.30.1.orig/cnf/configure_mods.sh
++++ perl-5.30.1/cnf/configure_mods.sh
+@@ -82,7 +82,7 @@ extonlyif() {
+ }
+
+ definetrimspaces() {
+- v=`echo "$2" | sed -r -e 's/\s+/ /g' -e 's/^\s+//' -e 's/\s+$//'`
++ v=`echo "$2" | sed -r -e 's/\s+/ /g' -e 's/^\s+//' -e 's/\s+$//' | xargs -n1 | LANG=C sort | xargs`
+ define $1 "$v"
+ }
+
+Index: perl-5.30.1/cpan/Encode/Byte/Makefile.PL
+===================================================================
+--- perl-5.30.1.orig/cpan/Encode/Byte/Makefile.PL
++++ perl-5.30.1/cpan/Encode/Byte/Makefile.PL
+@@ -171,7 +171,7 @@ sub postamble
+ my $lengthsofar = length($str);
+ my $continuator = '';
+ $str .= "$table.c : $enc2xs Makefile.PL";
+- foreach my $file (@{$tables{$table}})
++ foreach my $file (sort (@{$tables{$table}}))
+ {
+ $str .= $continuator.' '.$self->catfile($dir,$file);
+ if ( length($str)-$lengthsofar > 128*$numlines )
+@@ -189,7 +189,7 @@ sub postamble
+ qq{\n\t\$(PERL) $plib $enc2xs $ucopts -o \$\@ -f $table.fnm\n\n};
+ open (FILELIST, ">$table.fnm")
+ || die "Could not open $table.fnm: $!";
+- foreach my $file (@{$tables{$table}})
++ foreach my $file (sort (@{$tables{$table}}))
+ {
+ print FILELIST $self->catfile($dir,$file) . "\n";
+ }
+Index: perl-5.30.1/cnf/configure
+===================================================================
+--- perl-5.30.1.orig/cnf/configure
++++ perl-5.30.1/cnf/configure
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+
+ base=${0%/*}; test -z "$base" && base=.
+
diff --git a/meta/recipes-devtools/perl/files/encodefix.patch b/meta/recipes-devtools/perl/files/encodefix.patch
new file mode 100644
index 0000000000..396ed0d53e
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/encodefix.patch
@@ -0,0 +1,20 @@
+The code is encoding host compiler parameters into target builds. Avoid
+this for our target builds (patch is target specific, not native)
+
+Upstream-Status: Inappropriate [Cross compile hack]
+RP 2020/2/18
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: perl-5.30.1/cpan/Encode/bin/enc2xs
+===================================================================
+--- perl-5.30.1.orig/cpan/Encode/bin/enc2xs
++++ perl-5.30.1/cpan/Encode/bin/enc2xs
+@@ -195,7 +195,7 @@ sub compiler_info {
+ # above becomes false.
+ my $sized = $declaration && !($compat && !$pedantic);
+
+- return ($cpp, $static, $sized);
++ return (0, 1, 1);
+ }
+
+
diff --git a/meta/recipes-devtools/perl/files/fix-setgroup.patch b/meta/recipes-devtools/perl/files/fix-setgroup.patch
deleted file mode 100644
index 2b490e6067..0000000000
--- a/meta/recipes-devtools/perl/files/fix-setgroup.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-Test script to reproduce the problem:
-
-#!/usr/bin/env perl
-$) = "2 2";
-print $!;
-
-Result from perl 5.28 under strace:
-
-setgroups(1, [2]) = 0
-setresgid(-1, 2, -1) = 0
-
-Result from perl 5.30 under strace:
-
-setgroups(1, [-1]) = -1 EINVAL (Invalid argument)
-setresgid(-1, 2, -1) = 0
-
-Patch which broke this upstream:
-https://perl5.git.perl.org/perl.git/commitdiff/5d4a52b5c68a11bfc97c2e24806993b84a61eade
-
-Issue is that the new function changes the endptr to the end of the
-scanned number and needs to be reset to the end of the string for
-each iteration of the loop.
-
-[YOCTO #13391]
-
-RP
-2019/6/14
-Upstream-Status: Pending
-
-Index: perl-5.30.0/mg.c
-===================================================================
---- perl-5.30.0.orig/mg.c
-+++ perl-5.30.0/mg.c
-@@ -3179,6 +3256,7 @@ Perl_magic_set(pTHX_ SV *sv, MAGIC *mg)
- const char *p = SvPV_const(sv, len);
- Groups_t *gary = NULL;
- const char* endptr = p + len;
-+ const char* realend = p + len;
- UV uv;
- #ifdef _SC_NGROUPS_MAX
- int maxgrp = sysconf(_SC_NGROUPS_MAX);
-@@ -3209,6 +3287,7 @@ Perl_magic_set(pTHX_ SV *sv, MAGIC *mg)
- Newx(gary, i + 1, Groups_t);
- else
- Renew(gary, i + 1, Groups_t);
-+ endptr = realend;
- if (grok_atoUV(p, &uv, &endptr))
- gary[i] = (Groups_t)uv;
- else {
diff --git a/meta/recipes-devtools/perl/files/perl-configpm-switch.patch b/meta/recipes-devtools/perl/files/perl-configpm-switch.patch
index 3c2cecb8c1..80ce4a6de7 100644
--- a/meta/recipes-devtools/perl/files/perl-configpm-switch.patch
+++ b/meta/recipes-devtools/perl/files/perl-configpm-switch.patch
@@ -1,4 +1,4 @@
-From 7f313cac31c55cbe62a4d0cdfa8321cc05a8eb3a Mon Sep 17 00:00:00 2001
+From 5120acaa2be5787d9657f6b91bc8ee3c2d664fbe Mon Sep 17 00:00:00 2001
From: Alexander Kanavin <alex.kanavin@gmail.com>
Date: Sun, 27 May 2007 21:04:11 +0000
Subject: [PATCH] perl: 5.8.7 -> 5.8.8 (from OE)
@@ -20,7 +20,7 @@ Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
1 file changed, 16 insertions(+), 2 deletions(-)
diff --git a/configpm b/configpm
-index 09c4a3b..6a0a680 100755
+index c8de8bf..204613c 100755
--- a/configpm
+++ b/configpm
@@ -687,7 +687,7 @@ sub FETCH {
diff --git a/meta/recipes-devtools/perl/files/racefix.patch b/meta/recipes-devtools/perl/files/racefix.patch
new file mode 100644
index 0000000000..bac42d26ae
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/racefix.patch
@@ -0,0 +1,24 @@
+In our builds Config_heavy.pl sometimes has lines:
+cwarnflags=XXX
+ccstdflags=XXX
+and sometimes does not.
+The reason is that this information is pulled from cflags by configpm and yet
+there is no dependency in the Makefile. Add one to fix this.
+
+Upstream-Status: Submitted [https://github.com/arsv/perl-cross/pull/89]
+RP 2020/2/19
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: perl-5.30.1/Makefile
+===================================================================
+--- perl-5.30.1.orig/Makefile
++++ perl-5.30.1/Makefile
+@@ -204,7 +204,7 @@ configpod: $(CONFIGPOD)
+ git_version.h lib/Config_git.pl: make_patchnum.pl | miniperl$X
+ ./miniperl_top make_patchnum.pl
+
+-lib/Config.pm lib/Config_heavy.pl lib/Config.pod: config.sh \
++lib/Config.pm lib/Config_heavy.pl lib/Config.pod: config.sh cflags \
+ lib/Config_git.pl Porting/Glossary | miniperl$X
+ ./miniperl_top configpm
+
diff --git a/meta/recipes-devtools/perl/liberror-perl_0.17028.bb b/meta/recipes-devtools/perl/liberror-perl_0.17029.bb
index 8c6bbcba94..038808f0cd 100644
--- a/meta/recipes-devtools/perl/liberror-perl_0.17028.bb
+++ b/meta/recipes-devtools/perl/liberror-perl_0.17029.bb
@@ -32,8 +32,8 @@ RDEPENDS_${PN}-ptest += " \
SRC_URI = "http://cpan.metacpan.org/authors/id/S/SH/SHLOMIF/Error-${PV}.tar.gz"
-SRC_URI[md5sum] = "ec3522c60a43a368f19c0f89e2205cb1"
-SRC_URI[sha256sum] = "3ad85c5e58b31c8903006298424a51bba39f1840e324f5ae612eabc8b935e960"
+SRC_URI[md5sum] = "6732b1c6207e4a9a3e2987c88368039a"
+SRC_URI[sha256sum] = "1a23f7913032aed6d4b68321373a3899ca66590f4727391a091ec19c95bf7adc"
S = "${WORKDIR}/Error-${PV}"
diff --git a/meta/recipes-devtools/perl/libmodule-build-perl/run-ptest b/meta/recipes-devtools/perl/libmodule-build-perl/run-ptest
index 0d63d1513b..d802781f9e 100644
--- a/meta/recipes-devtools/perl/libmodule-build-perl/run-ptest
+++ b/meta/recipes-devtools/perl/libmodule-build-perl/run-ptest
@@ -6,8 +6,6 @@ for case in `find t -type f -name '*.t'`; do
cat $case.output
if [ $ret -ne 0 ]; then
echo "FAIL: ${case%.t}"
- elif grep -i 'SKIP' $case.output; then
- echo "SKIP: ${case%.t}"
else
echo "PASS: ${case%.t}"
fi
diff --git a/meta/recipes-devtools/perl/libmodule-build-perl_0.4229.bb b/meta/recipes-devtools/perl/libmodule-build-perl_0.4229.bb
index f759f862fb..e3ba40d96c 100644
--- a/meta/recipes-devtools/perl/libmodule-build-perl_0.4229.bb
+++ b/meta/recipes-devtools/perl/libmodule-build-perl_0.4229.bb
@@ -36,7 +36,10 @@ do_patch[postfuncs] += "do_patch_module_build"
do_install_ptest() {
cp -r ${B}/inc ${D}${PTEST_PATH}
cp -r ${B}/blib ${D}${PTEST_PATH}
+ cp -r ${B}/_build ${D}${PTEST_PATH}
+ cp -r ${B}/lib ${D}${PTEST_PATH}
chown -R root:root ${D}${PTEST_PATH}
+ sed -i -e "s,'perl' => .*,'perl' => '/usr/bin/perl'\,,g" ${D}${PTEST_PATH}/_build/build_params
}
RDEPENDS_${PN} += " \
diff --git a/meta/recipes-devtools/perl/perl-ptest.inc b/meta/recipes-devtools/perl/perl-ptest.inc
index 7152057762..98e3361fcc 100644
--- a/meta/recipes-devtools/perl/perl-ptest.inc
+++ b/meta/recipes-devtools/perl/perl-ptest.inc
@@ -42,6 +42,9 @@ do_install_ptest () {
# Remove a useless timestamp...
sed -i -e '/Autogenerated starting on/d' ${D}${PTEST_PATH}/lib/unicore/mktables.lst
+
+ # Remove files with host-specific configuration for building native binaries
+ rm ${D}${PTEST_PATH}/Makefile.config ${D}${PTEST_PATH}/xconfig.h ${D}${PTEST_PATH}/xconfig.sh
}
python populate_packages_prepend() {
diff --git a/meta/recipes-devtools/perl/perl_5.30.0.bb b/meta/recipes-devtools/perl/perl_5.30.1.bb
index ba2a8437d4..b633acfcea 100644
--- a/meta/recipes-devtools/perl/perl_5.30.0.bb
+++ b/meta/recipes-devtools/perl/perl_5.30.1.bb
@@ -8,7 +8,7 @@ LIC_FILES_CHKSUM = "file://Copying;md5=5b122a36d0f6dc55279a0ebc69f3c60b \
SRC_URI = "https://www.cpan.org/src/5.0/perl-${PV}.tar.gz;name=perl \
- https://github.com/arsv/perl-cross/releases/download/1.3/perl-cross-1.3.tar.gz;name=perl-cross \
+ https://github.com/arsv/perl-cross/releases/download/1.3.1/perl-cross-1.3.1.tar.gz;name=perl-cross \
file://perl-rdepends.txt \
file://0001-configure_tool.sh-do-not-quote-the-argument-to-comma.patch \
file://0001-ExtUtils-MakeMaker-add-LDFLAGS-when-linking-binary-m.patch \
@@ -18,18 +18,26 @@ SRC_URI = "https://www.cpan.org/src/5.0/perl-${PV}.tar.gz;name=perl \
file://0001-perl-cross-add-LDFLAGS-when-linking-libperl.patch \
file://perl-dynloader.patch \
file://0001-configure_path.sh-do-not-hardcode-prefix-lib-as-libr.patch \
- file://fix-setgroup.patch \
file://0001-enc2xs-Add-environment-variable-to-suppress-comments.patch \
file://0002-Constant-Fix-up-shebang.patch \
+ file://0001-tests-adjust-to-correctly-exclude-unbuilt-extensions.patch \
+ file://determinism.patch \
+ file://racefix.patch \
+ file://CVE-2020-10543.patch \
+ file://CVE-2020-10878_1.patch \
+ file://CVE-2020-10878_2.patch \
"
SRC_URI_append_class-native = " \
file://perl-configpm-switch.patch \
"
+SRC_URI_append_class-target = " \
+ file://encodefix.patch \
+"
-SRC_URI[perl.md5sum] = "9770584cdf9b5631c38097645ce33549"
-SRC_URI[perl.sha256sum] = "851213c754d98ccff042caa40ba7a796b2cee88c5325f121be5cbb61bbf975f2"
-SRC_URI[perl-cross.md5sum] = "4dda3daf9c4fe42b3d6a5dd052852a48"
-SRC_URI[perl-cross.sha256sum] = "49edea1ea2cd6c5c47386ca71beda8d150c748835781354dbe7f75b1df27e703"
+SRC_URI[perl.md5sum] = "6438eb7b8db9bbde28e01086de376a46"
+SRC_URI[perl.sha256sum] = "bf3d25571ff1ee94186177c2cdef87867fd6a14aa5a84f0b1fb7bf798f42f964"
+SRC_URI[perl-cross.md5sum] = "1e463b105cfa56d251a86979af23e3a7"
+SRC_URI[perl-cross.sha256sum] = "edce0b0c2f725e2db3f203d6d8e9f3f7161256f5d1590551e40694f21200141d"
S = "${WORKDIR}/perl-${PV}"
@@ -112,6 +120,14 @@ print(datetime.fromtimestamp($SOURCE_DATE_EPOCH, timezone.utc).strftime('%a %b %
do_compile() {
oe_runmake
+ # This isn't generated reliably so delete and re-generate.
+ # https://github.com/arsv/perl-cross/issues/86
+
+ if [ -e pod/perltoc.pod ]; then
+ bbnote Rebuilding perltoc.pod
+ rm -f pod/perltoc.pod
+ oe_runmake pod/perltoc.pod
+ fi
}
do_install() {
@@ -135,6 +151,9 @@ do_install_append_class-target() {
# This is used to substitute target configuration when running native perl via perl-configpm-switch.patch
ln -s Config_heavy.pl ${D}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_heavy-target.pl
+ # This contains host-specific information used for building miniperl (a helper executable built with host compiler)
+ # and therefore isn't reproducible. I believe the file isn't actually needed on target.
+ rm ${D}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/xconfig.h
}
do_install_append_class-nativesdk() {
@@ -198,6 +217,7 @@ require perl-ptest.inc
FILES_${PN} = "${bindir}/perl ${bindir}/perl.real ${bindir}/perl${PV} ${libdir}/libperl.so* \
${libdir}/perl5/site_perl \
${libdir}/perl5/${PV}/Config.pm \
+ ${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config.pm \
${libdir}/perl5/${PV}/*/Config_git.pl \
${libdir}/perl5/${PV}/*/Config_heavy-target.pl \
${libdir}/perl5/config.sh \
@@ -206,6 +226,9 @@ FILES_${PN} = "${bindir}/perl ${bindir}/perl.real ${bindir}/perl${PV} ${libdir}/
${libdir}/perl5/${PV}/warnings \
${libdir}/perl5/${PV}/vars.pm \
${libdir}/perl5/site_perl \
+ ${libdir}/perl5/${PV}/ExtUtils/MANIFEST.SKIP \
+ ${libdir}/perl5/${PV}/ExtUtils/xsubpp \
+ ${libdir}/perl5/${PV}/ExtUtils/typemap \
"
RPROVIDES_${PN} += "perl-module-strict perl-module-vars perl-module-config perl-module-warnings \
perl-module-warnings-register"
@@ -216,9 +239,6 @@ FILES_${PN}-dev_append = " ${libdir}/perl5/${PV}/*/CORE"
FILES_${PN}-doc_append = " ${libdir}/perl5/${PV}/Unicode/Collate/*.txt \
${libdir}/perl5/${PV}/*/.packlist \
- ${libdir}/perl5/${PV}/ExtUtils/MANIFEST.SKIP \
- ${libdir}/perl5/${PV}/ExtUtils/xsubpp \
- ${libdir}/perl5/${PV}/ExtUtils/typemap \
${libdir}/perl5/${PV}/Encode/encode.h \
"
PACKAGES += "${PN}-misc"
@@ -254,7 +274,7 @@ python split_perl_packages () {
do_split_packages(d, libdir, r'Module/([^\/]*)\.pm', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False)
do_split_packages(d, libdir, r'Module/([^\/]*)/.*', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False)
do_split_packages(d, libdir, r'.*linux/([^\/].*)\.(pm|pl|e2x)', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False)
- do_split_packages(d, libdir, r'(^(?!(CPAN\/|CPANPLUS\/|Module\/|unicore\/)[^\/]).*)\.(pm|pl|e2x)', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False)
+ do_split_packages(d, libdir, r'(^(?!(CPAN\/|CPANPLUS\/|Module\/|unicore\/|.*linux\/)[^\/]).*)\.(pm|pl|e2x)', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False)
# perl-modules should recommend every perl module, and only the
# modules. Don't attempt to use the result of do_split_packages() as some
diff --git a/meta/recipes-devtools/pseudo/pseudo.inc b/meta/recipes-devtools/pseudo/pseudo.inc
index 7ff8e449e9..50e30064bd 100644
--- a/meta/recipes-devtools/pseudo/pseudo.inc
+++ b/meta/recipes-devtools/pseudo/pseudo.inc
@@ -16,6 +16,7 @@ INSANE_SKIP_${PN}-dbg += "libdir"
PROVIDES += "virtual/fakeroot"
MAKEOPTS = ""
+MAKEOPTS_class-native = "'RPATH=-Wl,--rpath=XORIGIN/../../../sqlite3-native/usr/lib/'"
inherit siteinfo pkgconfig
@@ -115,6 +116,7 @@ do_install () {
}
do_install_append_class-native () {
+ chrpath ${D}${bindir}/pseudo -r `chrpath ${D}${bindir}/pseudo | cut -d = -f 2 | sed s/XORIGIN/\\$ORIGIN/`
install -d ${D}${sysconfdir}
# The fallback files should never be modified
install -m 444 ${WORKDIR}/fallback-passwd ${D}${sysconfdir}/passwd
diff --git a/meta/recipes-devtools/python-numpy/files/aarch64/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/aarch64/_numpyconfig.h
deleted file mode 100644
index 109deb0435..0000000000
--- a/meta/recipes-devtools/python-numpy/files/aarch64/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_SIZEOF_PY_INTPTR_T 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/aarch64/config.h b/meta/recipes-devtools/python-numpy/files/aarch64/config.h
deleted file mode 100644
index c30b868f2f..0000000000
--- a/meta/recipes-devtools/python-numpy/files/aarch64/config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#define HAVE_ENDIAN_H 1
-#define SIZEOF_PY_INTPTR_T 8
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_IEEE_QUAD_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/arm/config.h b/meta/recipes-devtools/python-numpy/files/arm/config.h
deleted file mode 100644
index 17ef186d56..0000000000
--- a/meta/recipes-devtools/python-numpy/files/arm/config.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* ./src.linux-i686-2.5/numpy/core/include/numpy/config.h */
-/* #define SIZEOF_SHORT 2 */
-/* #define SIZEOF_INT 4 */
-/* #define SIZEOF_LONG 4 */
-/* #define SIZEOF_FLOAT 4 */
-/* #define SIZEOF_DOUBLE 8 */
-#define SIZEOF_LONG_DOUBLE 12
-#define SIZEOF_PY_INTPTR_T 4
-/* #define SIZEOF_LONG_LONG 8 */
-#define SIZEOF_PY_LONG_LONG 8
-/* #define CHAR_BIT 8 */
-#define MATHLIB m
-#define HAVE_FLOAT_FUNCS
-#define HAVE_LOG1P
-#define HAVE_EXPM1
-#define HAVE_INVERSE_HYPERBOLIC
-#define HAVE_INVERSE_HYPERBOLIC_FLOAT
-#define HAVE_ISNAN
-#define HAVE_ISINF
-#define HAVE_RINT
-
diff --git a/meta/recipes-devtools/python-numpy/files/arm/numpyconfig.h b/meta/recipes-devtools/python-numpy/files/arm/numpyconfig.h
deleted file mode 100644
index c4bf6547f0..0000000000
--- a/meta/recipes-devtools/python-numpy/files/arm/numpyconfig.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* cat ./src.linux-i686-2.5/numpy/core/include/numpy/numpyconfig.h */
-/*
- * * This file is generated by numpy/core/setup.pyc. DO NOT EDIT
- * */
-#define NPY_SIZEOF_SHORT 2
-#define NPY_SIZEOF_INT 4
-#define NPY_SIZEOF_LONG 4
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_LONGDOUBLE 12
-#define NPY_SIZEOF_PY_INTPTR_T 4
-#define NPY_NO_SMP 0
-
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-/* #define CHAR_BIT 8 */
-
diff --git a/meta/recipes-devtools/python-numpy/files/armeb/config.h b/meta/recipes-devtools/python-numpy/files/armeb/config.h
deleted file mode 100644
index 17ef186d56..0000000000
--- a/meta/recipes-devtools/python-numpy/files/armeb/config.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* ./src.linux-i686-2.5/numpy/core/include/numpy/config.h */
-/* #define SIZEOF_SHORT 2 */
-/* #define SIZEOF_INT 4 */
-/* #define SIZEOF_LONG 4 */
-/* #define SIZEOF_FLOAT 4 */
-/* #define SIZEOF_DOUBLE 8 */
-#define SIZEOF_LONG_DOUBLE 12
-#define SIZEOF_PY_INTPTR_T 4
-/* #define SIZEOF_LONG_LONG 8 */
-#define SIZEOF_PY_LONG_LONG 8
-/* #define CHAR_BIT 8 */
-#define MATHLIB m
-#define HAVE_FLOAT_FUNCS
-#define HAVE_LOG1P
-#define HAVE_EXPM1
-#define HAVE_INVERSE_HYPERBOLIC
-#define HAVE_INVERSE_HYPERBOLIC_FLOAT
-#define HAVE_ISNAN
-#define HAVE_ISINF
-#define HAVE_RINT
-
diff --git a/meta/recipes-devtools/python-numpy/files/armeb/numpyconfig.h b/meta/recipes-devtools/python-numpy/files/armeb/numpyconfig.h
deleted file mode 100644
index c4bf6547f0..0000000000
--- a/meta/recipes-devtools/python-numpy/files/armeb/numpyconfig.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* cat ./src.linux-i686-2.5/numpy/core/include/numpy/numpyconfig.h */
-/*
- * * This file is generated by numpy/core/setup.pyc. DO NOT EDIT
- * */
-#define NPY_SIZEOF_SHORT 2
-#define NPY_SIZEOF_INT 4
-#define NPY_SIZEOF_LONG 4
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_LONGDOUBLE 12
-#define NPY_SIZEOF_PY_INTPTR_T 4
-#define NPY_NO_SMP 0
-
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-/* #define CHAR_BIT 8 */
-
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarchn32eb/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/mipsarchn32eb/_numpyconfig.h
deleted file mode 100644
index debb390094..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarchn32eb/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_SIZEOF_PY_INTPTR_T 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarchn32eb/config.h b/meta/recipes-devtools/python-numpy/files/mipsarchn32eb/config.h
deleted file mode 100644
index c30b868f2f..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarchn32eb/config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#define HAVE_ENDIAN_H 1
-#define SIZEOF_PY_INTPTR_T 8
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_IEEE_QUAD_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarchn32el/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/mipsarchn32el/_numpyconfig.h
deleted file mode 100644
index 8e2b5d0940..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarchn32el/_numpyconfig.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_SIZEOF_PY_INTPTR_T 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarchn32el/config.h b/meta/recipes-devtools/python-numpy/files/mipsarchn32el/config.h
deleted file mode 100644
index 48727039ae..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarchn32el/config.h
+++ /dev/null
@@ -1,138 +0,0 @@
-#define SIZEOF_PY_INTPTR_T 8
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_IEEE_QUAD_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarchn64eb/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/mipsarchn64eb/_numpyconfig.h
deleted file mode 100644
index debb390094..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarchn64eb/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_SIZEOF_PY_INTPTR_T 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarchn64eb/config.h b/meta/recipes-devtools/python-numpy/files/mipsarchn64eb/config.h
deleted file mode 100644
index c30b868f2f..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarchn64eb/config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#define HAVE_ENDIAN_H 1
-#define SIZEOF_PY_INTPTR_T 8
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_IEEE_QUAD_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarchn64el/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/mipsarchn64el/_numpyconfig.h
deleted file mode 100644
index debb390094..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarchn64el/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_SIZEOF_PY_INTPTR_T 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarchn64el/config.h b/meta/recipes-devtools/python-numpy/files/mipsarchn64el/config.h
deleted file mode 100644
index 48727039ae..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarchn64el/config.h
+++ /dev/null
@@ -1,138 +0,0 @@
-#define SIZEOF_PY_INTPTR_T 8
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_IEEE_QUAD_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarcho32eb/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/mipsarcho32eb/_numpyconfig.h
deleted file mode 100644
index 4c465c216c..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarcho32eb/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 8
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_SIZEOF_PY_INTPTR_T 4
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarcho32eb/config.h b/meta/recipes-devtools/python-numpy/files/mipsarcho32eb/config.h
deleted file mode 100644
index 2f6135adce..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarcho32eb/config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#define HAVE_ENDIAN_H 1
-#define SIZEOF_PY_INTPTR_T 4
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_IEEE_DOUBLE_BE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarcho32el/config.h b/meta/recipes-devtools/python-numpy/files/mipsarcho32el/config.h
deleted file mode 100644
index 17ef186d56..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarcho32el/config.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* ./src.linux-i686-2.5/numpy/core/include/numpy/config.h */
-/* #define SIZEOF_SHORT 2 */
-/* #define SIZEOF_INT 4 */
-/* #define SIZEOF_LONG 4 */
-/* #define SIZEOF_FLOAT 4 */
-/* #define SIZEOF_DOUBLE 8 */
-#define SIZEOF_LONG_DOUBLE 12
-#define SIZEOF_PY_INTPTR_T 4
-/* #define SIZEOF_LONG_LONG 8 */
-#define SIZEOF_PY_LONG_LONG 8
-/* #define CHAR_BIT 8 */
-#define MATHLIB m
-#define HAVE_FLOAT_FUNCS
-#define HAVE_LOG1P
-#define HAVE_EXPM1
-#define HAVE_INVERSE_HYPERBOLIC
-#define HAVE_INVERSE_HYPERBOLIC_FLOAT
-#define HAVE_ISNAN
-#define HAVE_ISINF
-#define HAVE_RINT
-
diff --git a/meta/recipes-devtools/python-numpy/files/mipsarcho32el/numpyconfig.h b/meta/recipes-devtools/python-numpy/files/mipsarcho32el/numpyconfig.h
deleted file mode 100644
index 0b7cd51af4..0000000000
--- a/meta/recipes-devtools/python-numpy/files/mipsarcho32el/numpyconfig.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* cat ./src.linux-i686-2.5/numpy/core/include/numpy/numpyconfig.h */
-/*
- * * This file is generated by numpy/core/setup.pyc. DO NOT EDIT
- * */
-#define NPY_SIZEOF_SHORT 2
-#define NPY_SIZEOF_INT 4
-#define NPY_SIZEOF_LONG 4
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_LONGDOUBLE 12
-#define NPY_SIZEOF_PY_INTPTR_T 4
-#define NPY_NO_SMP 0
-
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_OFF_T 8
-/* #define CHAR_BIT 8 */
-
diff --git a/meta/recipes-devtools/python-numpy/files/powerpc/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/powerpc/_numpyconfig.h
deleted file mode 100644
index 6e7262ad91..0000000000
--- a/meta/recipes-devtools/python-numpy/files/powerpc/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_SIZEOF_PY_INTPTR_T 4
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/powerpc/config.h b/meta/recipes-devtools/python-numpy/files/powerpc/config.h
deleted file mode 100644
index f65d39d5de..0000000000
--- a/meta/recipes-devtools/python-numpy/files/powerpc/config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#define HAVE_ENDIAN_H 1
-#define SIZEOF_PY_INTPTR_T 4
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_DOUBLE_DOUBLE_BE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/powerpc64/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/powerpc64/_numpyconfig.h
deleted file mode 100644
index debb390094..0000000000
--- a/meta/recipes-devtools/python-numpy/files/powerpc64/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_SIZEOF_PY_INTPTR_T 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/powerpc64/config.h b/meta/recipes-devtools/python-numpy/files/powerpc64/config.h
deleted file mode 100644
index c30b868f2f..0000000000
--- a/meta/recipes-devtools/python-numpy/files/powerpc64/config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#define HAVE_ENDIAN_H 1
-#define SIZEOF_PY_INTPTR_T 8
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_IEEE_QUAD_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/riscv64/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/riscv64/_numpyconfig.h
deleted file mode 100644
index 109deb0435..0000000000
--- a/meta/recipes-devtools/python-numpy/files/riscv64/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_SIZEOF_PY_INTPTR_T 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/riscv64/config.h b/meta/recipes-devtools/python-numpy/files/riscv64/config.h
deleted file mode 100644
index c30b868f2f..0000000000
--- a/meta/recipes-devtools/python-numpy/files/riscv64/config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#define HAVE_ENDIAN_H 1
-#define SIZEOF_PY_INTPTR_T 8
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_IEEE_QUAD_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/x86-64/_numpyconfig.h b/meta/recipes-devtools/python-numpy/files/x86-64/_numpyconfig.h
deleted file mode 100644
index b330361649..0000000000
--- a/meta/recipes-devtools/python-numpy/files/x86-64/_numpyconfig.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#define NPY_HAVE_ENDIAN_H 1
-#define NPY_SIZEOF_SHORT SIZEOF_SHORT
-#define NPY_SIZEOF_INT SIZEOF_INT
-#define NPY_SIZEOF_LONG SIZEOF_LONG
-#define NPY_SIZEOF_FLOAT 4
-#define NPY_SIZEOF_COMPLEX_FLOAT 8
-#define NPY_SIZEOF_DOUBLE 8
-#define NPY_SIZEOF_OFF_T 8
-#define NPY_SIZEOF_COMPLEX_DOUBLE 16
-#define NPY_SIZEOF_LONGDOUBLE 16
-#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
-#define NPY_ENABLE_SEPARATE_COMPILATION 1
-#define NPY_SIZEOF_PY_INTPTR_T 8
-#define NPY_SIZEOF_PY_LONG_LONG 8
-#define NPY_SIZEOF_LONGLONG 8
-#define NPY_NO_SMP 0
-#define NPY_HAVE_DECL_ISNAN
-#define NPY_HAVE_DECL_ISINF
-#define NPY_HAVE_DECL_ISFINITE
-#define NPY_HAVE_DECL_SIGNBIT
-#define NPY_USE_C99_COMPLEX 1
-#define NPY_HAVE_COMPLEX_DOUBLE 1
-#define NPY_HAVE_COMPLEX_FLOAT 1
-#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
-#define NPY_USE_C99_FORMATS 1
-#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
-#define NPY_ABI_VERSION 0x01000009
-#define NPY_API_VERSION 0x0000000A
-
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/x86-64/config.h b/meta/recipes-devtools/python-numpy/files/x86-64/config.h
deleted file mode 100644
index 0ce63b7d22..0000000000
--- a/meta/recipes-devtools/python-numpy/files/x86-64/config.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#define HAVE_ENDIAN_H 1
-#define SIZEOF_PY_INTPTR_T 8
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN 1
-#define HAVE_COS 1
-#define HAVE_TAN 1
-#define HAVE_SINH 1
-#define HAVE_COSH 1
-#define HAVE_TANH 1
-#define HAVE_FABS 1
-#define HAVE_FLOOR 1
-#define HAVE_CEIL 1
-#define HAVE_SQRT 1
-#define HAVE_LOG10 1
-#define HAVE_LOG 1
-#define HAVE_EXP 1
-#define HAVE_ASIN 1
-#define HAVE_ACOS 1
-#define HAVE_ATAN 1
-#define HAVE_FMOD 1
-#define HAVE_MODF 1
-#define HAVE_FREXP 1
-#define HAVE_LDEXP 1
-#define HAVE_RINT 1
-#define HAVE_TRUNC 1
-#define HAVE_EXP2 1
-#define HAVE_LOG2 1
-#define HAVE_ATAN2 1
-#define HAVE_POW 1
-#define HAVE_NEXTAFTER 1
-#define HAVE_SINF 1
-#define HAVE_COSF 1
-#define HAVE_TANF 1
-#define HAVE_SINHF 1
-#define HAVE_COSHF 1
-#define HAVE_TANHF 1
-#define HAVE_FABSF 1
-#define HAVE_FLOORF 1
-#define HAVE_CEILF 1
-#define HAVE_RINTF 1
-#define HAVE_TRUNCF 1
-#define HAVE_SQRTF 1
-#define HAVE_LOG10F 1
-#define HAVE_LOGF 1
-#define HAVE_LOG1PF 1
-#define HAVE_EXPF 1
-#define HAVE_EXPM1F 1
-#define HAVE_ASINF 1
-#define HAVE_ACOSF 1
-#define HAVE_ATANF 1
-#define HAVE_ASINHF 1
-#define HAVE_ACOSHF 1
-#define HAVE_ATANHF 1
-#define HAVE_HYPOTF 1
-#define HAVE_ATAN2F 1
-#define HAVE_POWF 1
-#define HAVE_FMODF 1
-#define HAVE_MODFF 1
-#define HAVE_FREXPF 1
-#define HAVE_LDEXPF 1
-#define HAVE_EXP2F 1
-#define HAVE_LOG2F 1
-#define HAVE_COPYSIGNF 1
-#define HAVE_NEXTAFTERF 1
-#define HAVE_SINL 1
-#define HAVE_COSL 1
-#define HAVE_TANL 1
-#define HAVE_SINHL 1
-#define HAVE_COSHL 1
-#define HAVE_TANHL 1
-#define HAVE_FABSL 1
-#define HAVE_FLOORL 1
-#define HAVE_CEILL 1
-#define HAVE_RINTL 1
-#define HAVE_TRUNCL 1
-#define HAVE_SQRTL 1
-#define HAVE_LOG10L 1
-#define HAVE_LOGL 1
-#define HAVE_LOG1PL 1
-#define HAVE_EXPL 1
-#define HAVE_EXPM1L 1
-#define HAVE_ASINL 1
-#define HAVE_ACOSL 1
-#define HAVE_ATANL 1
-#define HAVE_ASINHL 1
-#define HAVE_ACOSHL 1
-#define HAVE_ATANHL 1
-#define HAVE_HYPOTL 1
-#define HAVE_ATAN2L 1
-#define HAVE_POWL 1
-#define HAVE_FMODL 1
-#define HAVE_MODFL 1
-#define HAVE_FREXPL 1
-#define HAVE_LDEXPL 1
-#define HAVE_EXP2L 1
-#define HAVE_LOG2L 1
-#define HAVE_COPYSIGNL 1
-#define HAVE_NEXTAFTERL 1
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H 1
-#define HAVE_CREAL 1
-#define HAVE_CIMAG 1
-#define HAVE_CABS 1
-#define HAVE_CARG 1
-#define HAVE_CEXP 1
-#define HAVE_CSQRT 1
-#define HAVE_CLOG 1
-#define HAVE_CCOS 1
-#define HAVE_CSIN 1
-#define HAVE_CPOW 1
-#define HAVE_CREALF 1
-#define HAVE_CIMAGF 1
-#define HAVE_CABSF 1
-#define HAVE_CARGF 1
-#define HAVE_CEXPF 1
-#define HAVE_CSQRTF 1
-#define HAVE_CLOGF 1
-#define HAVE_CCOSF 1
-#define HAVE_CSINF 1
-#define HAVE_CPOWF 1
-#define HAVE_CREALL 1
-#define HAVE_CIMAGL 1
-#define HAVE_CABSL 1
-#define HAVE_CARGL 1
-#define HAVE_CEXPL 1
-#define HAVE_CSQRTL 1
-#define HAVE_CLOGL 1
-#define HAVE_CCOSL 1
-#define HAVE_CSINL 1
-#define HAVE_CPOWL 1
-#define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/x86/config.h b/meta/recipes-devtools/python-numpy/files/x86/config.h
deleted file mode 100644
index 08e41e3d99..0000000000
--- a/meta/recipes-devtools/python-numpy/files/x86/config.h
+++ /dev/null
@@ -1,108 +0,0 @@
-#define SIZEOF_PY_INTPTR_T 4
-#define SIZEOF_PY_LONG_LONG 8
-#define MATHLIB m
-#define HAVE_SIN
-#define HAVE_COS
-#define HAVE_TAN
-#define HAVE_SINH
-#define HAVE_COSH
-#define HAVE_TANH
-#define HAVE_FABS
-#define HAVE_FLOOR
-#define HAVE_CEIL
-#define HAVE_SQRT
-#define HAVE_LOG10
-#define HAVE_LOG
-#define HAVE_EXP
-#define HAVE_ASIN
-#define HAVE_ACOS
-#define HAVE_ATAN
-#define HAVE_FMOD
-#define HAVE_MODF
-#define HAVE_FREXP
-#define HAVE_LDEXP
-#define HAVE_RINT
-#define HAVE_TRUNC
-#define HAVE_EXP2
-#define HAVE_LOG2
-#define HAVE_ATAN2
-#define HAVE_POW
-#define HAVE_NEXTAFTER
-#define HAVE_SINF
-#define HAVE_COSF
-#define HAVE_TANF
-#define HAVE_SINHF
-#define HAVE_COSHF
-#define HAVE_TANHF
-#define HAVE_FABSF
-#define HAVE_FLOORF
-#define HAVE_CEILF
-#define HAVE_RINTF
-#define HAVE_TRUNCF
-#define HAVE_SQRTF
-#define HAVE_LOG10F
-#define HAVE_LOGF
-#define HAVE_LOG1PF
-#define HAVE_EXPF
-#define HAVE_EXPM1F
-#define HAVE_ASINF
-#define HAVE_ACOSF
-#define HAVE_ATANF
-#define HAVE_ASINHF
-#define HAVE_ACOSHF
-#define HAVE_ATANHF
-#define HAVE_HYPOTF
-#define HAVE_ATAN2F
-#define HAVE_POWF
-#define HAVE_FMODF
-#define HAVE_MODFF
-#define HAVE_FREXPF
-#define HAVE_LDEXPF
-#define HAVE_EXP2F
-#define HAVE_LOG2F
-#define HAVE_COPYSIGNF
-#define HAVE_NEXTAFTERF
-#define HAVE_SINL
-#define HAVE_COSL
-#define HAVE_TANL
-#define HAVE_SINHL
-#define HAVE_COSHL
-#define HAVE_TANHL
-#define HAVE_FABSL
-#define HAVE_FLOORL
-#define HAVE_CEILL
-#define HAVE_RINTL
-#define HAVE_TRUNCL
-#define HAVE_SQRTL
-#define HAVE_LOG10L
-#define HAVE_LOGL
-#define HAVE_LOG1PL
-#define HAVE_EXPL
-#define HAVE_EXPM1L
-#define HAVE_ASINL
-#define HAVE_ACOSL
-#define HAVE_ATANL
-#define HAVE_ASINHL
-#define HAVE_ACOSHL
-#define HAVE_ATANHL
-#define HAVE_HYPOTL
-#define HAVE_ATAN2L
-#define HAVE_POWL
-#define HAVE_FMODL
-#define HAVE_MODFL
-#define HAVE_FREXPL
-#define HAVE_LDEXPL
-#define HAVE_EXP2L
-#define HAVE_LOG2L
-#define HAVE_COPYSIGNL
-#define HAVE_NEXTAFTERL
-#define HAVE_DECL_SIGNBIT
-#define HAVE_COMPLEX_H
-#define HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE 1
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
diff --git a/meta/recipes-devtools/python-numpy/files/x86/numpyconfig.h b/meta/recipes-devtools/python-numpy/files/x86/numpyconfig.h
deleted file mode 100644
index ff7938cd96..0000000000
--- a/meta/recipes-devtools/python-numpy/files/x86/numpyconfig.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _NPY_NUMPYCONFIG_H_
-#define _NPY_NUMPYCONFIG_H_
-
-#include "_numpyconfig.h"
-
-/*
- * On Mac OS X, because there is only one configuration stage for all the archs
- * in universal builds, any macro which depends on the arch needs to be
- * harcoded
- */
-#ifdef __APPLE__
- #undef NPY_SIZEOF_LONG
- #undef NPY_SIZEOF_PY_INTPTR_T
-
- #ifdef __LP64__
- #define NPY_SIZEOF_LONG 8
- #define NPY_SIZEOF_PY_INTPTR_T 8
- #else
- #define NPY_SIZEOF_LONG 4
- #define NPY_SIZEOF_PY_INTPTR_T 4
- #endif
-#endif
-
-#endif
diff --git a/meta/recipes-devtools/python-numpy/python-numpy.inc b/meta/recipes-devtools/python-numpy/python-numpy.inc
index a12e72f964..f68b90e6b9 100644
--- a/meta/recipes-devtools/python-numpy/python-numpy.inc
+++ b/meta/recipes-devtools/python-numpy/python-numpy.inc
@@ -8,7 +8,6 @@ SRCNAME = "numpy"
SRC_URI = "https://github.com/${SRCNAME}/${SRCNAME}/releases/download/v${PV}/${SRCNAME}-${PV}.tar.gz \
file://0001-Don-t-search-usr-and-so-on-for-libraries-by-default-.patch \
file://0001-npy_cpu-Add-riscv-support.patch \
- ${CONFIGFILESURI} \
file://0001-numpy-random-setup.py-remove-the-detection-of-x86-ta.patch \
"
SRC_URI[md5sum] = "c48b2ad785f82cdfe28c907ce35e2a71"
@@ -20,77 +19,10 @@ UPSTREAM_CHECK_REGEX = "(?P<pver>\d+(\.\d+)+)\.tar"
# Needed for building with gcc 4.x from centos 7
CFLAGS_append_class-native = " -std=c99"
-CONFIGFILESURI ?= ""
-
-CONFIGFILESURI_aarch64 = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_arm = " \
- file://config.h \
- file://numpyconfig.h \
-"
-CONFIGFILESURI_armeb = " \
- file://config.h \
- file://numpyconfig.h \
-"
-CONFIGFILESURI_mipsarcho32el = " \
- file://config.h \
- file://numpyconfig.h \
-"
-CONFIGFILESURI_x86 = " \
- file://config.h \
- file://numpyconfig.h \
-"
-CONFIGFILESURI_x86-64 = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_mipsarcho32eb = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_powerpc = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_powerpc64 = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_mipsarchn64eb = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_mipsarchn64el = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_mipsarchn32eb = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_mipsarchn32el = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-CONFIGFILESURI_riscv64 = " \
- file://config.h \
- file://_numpyconfig.h \
-"
-
S = "${WORKDIR}/numpy-${PV}"
CLEANBROKEN = "1"
-# Make the build fail and replace *config.h with proper one
-# This is a ugly, ugly hack - Koen
-do_compile_prepend_class-target() {
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
- true
- cp ${WORKDIR}/*config.h ${S}/build/$(ls ${S}/build | grep src)/numpy/core/include/numpy/
-}
-
FILES_${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/numpy/core/lib/*.a"
# install what is needed for numpy.test()
diff --git a/meta/recipes-devtools/python/python-native_2.7.17.bb b/meta/recipes-devtools/python/python-native_2.7.18.bb
index 335318bab8..335318bab8 100644
--- a/meta/recipes-devtools/python/python-native_2.7.17.bb
+++ b/meta/recipes-devtools/python/python-native_2.7.18.bb
diff --git a/meta/recipes-devtools/python/python.inc b/meta/recipes-devtools/python/python.inc
index 19a2f3e743..fe281586fc 100644
--- a/meta/recipes-devtools/python/python.inc
+++ b/meta/recipes-devtools/python/python.inc
@@ -5,13 +5,13 @@ SECTION = "devel/python"
# bump this on every change in contrib/python/generate-manifest-2.7.py
INC_PR = "r1"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=e466242989bd33c1bd2b6a526a742498"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=203a6dbc802ee896020a47161e759642"
SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
"
-SRC_URI[md5sum] = "b3b6d2c92f42a60667814358ab9f0cfd"
-SRC_URI[sha256sum] = "4d43f033cdbd0aa7b7023c81b0e986fd11e653b5248dac9144d508f11812ba41"
+SRC_URI[md5sum] = "fd6cc8ec0a78c44036f825e739f36e5a"
+SRC_URI[sha256sum] = "b62c0e7937551d0cc02b8fd5cb0f544f9405bafc9a54d3808ed4594812edef43"
# python recipe is actually python 2.x
# also, exclude pre-releases for both python 2.x and 3.x
diff --git a/meta/recipes-devtools/python/python/python2-manifest.json b/meta/recipes-devtools/python/python/python2-manifest.json
index eb52e862ab..fd98774d00 100644
--- a/meta/recipes-devtools/python/python/python2-manifest.json
+++ b/meta/recipes-devtools/python/python/python2-manifest.json
@@ -267,6 +267,7 @@
"${libdir}/python2.7/lib-dynload/xreadlines.so",
"${libdir}/python2.7/linecache.py",
"${libdir}/python2.7/new.py",
+ "${libdir}/python2.7/ntpath.py",
"${libdir}/python2.7/os.py",
"${libdir}/python2.7/platform.py",
"${libdir}/python2.7/posixpath.py",
diff --git a/meta/recipes-devtools/python/python3-testtools/no_traceback2.patch b/meta/recipes-devtools/python/python3-testtools/no_traceback2.patch
new file mode 100644
index 0000000000..594510342b
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-testtools/no_traceback2.patch
@@ -0,0 +1,23 @@
+traceback2 adds traceback for python2. Rather than depend on traceback2, we're
+python3 only so just use traceback.
+This caused breakage in oe-selftest -j which uses testtools on the autobuilder
+using buildtools-tarball.
+
+Upstream-Status: Inappropriate [Our recipe is python3 specific]
+(Once py2 is EOL upstream probably could/should take this)
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: testtools-2.3.0/testtools/content.py
+===================================================================
+--- testtools-2.3.0.orig/testtools/content.py
++++ testtools-2.3.0/testtools/content.py
+@@ -19,8 +19,7 @@ import os
+ import sys
+
+ from extras import try_import
+-# To let setup.py work, make this a conditional import.
+-traceback = try_import('traceback2')
++import traceback
+
+ from testtools.compat import (
+ _b,
diff --git a/meta/recipes-devtools/python/python3-testtools_2.3.0.bb b/meta/recipes-devtools/python/python3-testtools_2.3.0.bb
index 896ecee65c..a254b90a75 100644
--- a/meta/recipes-devtools/python/python3-testtools_2.3.0.bb
+++ b/meta/recipes-devtools/python/python3-testtools_2.3.0.bb
@@ -1,2 +1,4 @@
inherit setuptools3
require python-testtools.inc
+
+SRC_URI += "file://no_traceback2.patch"
diff --git a/meta/recipes-devtools/python/python3/0001-Don-t-search-system-for-headers-libraries.patch b/meta/recipes-devtools/python/python3/0001-Don-t-search-system-for-headers-libraries.patch
new file mode 100644
index 0000000000..acf8e1e9b5
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/0001-Don-t-search-system-for-headers-libraries.patch
@@ -0,0 +1,29 @@
+From 85e8f86ad2b7dec0848cd55b8e810a5e2722b20a Mon Sep 17 00:00:00 2001
+From: Jeremy Puhlman <jpuhlman@mvista.com>
+Date: Wed, 4 Mar 2020 00:06:42 +0000
+Subject: [PATCH] Don't search system for headers/libraries
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Jeremy Puhlman <jpuhlman@mvista.com>
+---
+ setup.py | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/setup.py b/setup.py
+index 9da1b3a..59782c0 100644
+--- a/setup.py
++++ b/setup.py
+@@ -674,8 +674,8 @@ class PyBuildExt(build_ext):
+ add_dir_to_list(self.compiler.include_dirs,
+ sysconfig.get_config_var("INCLUDEDIR"))
+
+- system_lib_dirs = ['/lib64', '/usr/lib64', '/lib', '/usr/lib']
+- system_include_dirs = ['/usr/include']
++ system_lib_dirs = []
++ system_include_dirs = []
+ # lib_dirs and inc_dirs are used to search for files;
+ # if a file is found in one of those directories, it can
+ # be assumed that no additional -I,-L directives are needed.
+--
+2.24.1
+
diff --git a/meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch b/meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch
new file mode 100644
index 0000000000..c15295c034
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/0017-setup.py-do-not-report-missing-dependencies-for-disa.patch
@@ -0,0 +1,31 @@
+From e3b59cb9658e1d3efa3535840939a0fa92a70a5a Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Mon, 7 Oct 2019 13:22:14 +0200
+Subject: [PATCH] setup.py: do not report missing dependencies for disabled
+ modules
+
+Reporting those missing dependencies is misleading as the modules would not
+have been built anyway. This particularly matters in oe-core's automated
+build completeness checker which relies on the report.
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ setup.py | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/setup.py b/setup.py
+index 4b53668..0097643 100644
+--- a/setup.py
++++ b/setup.py
+@@ -365,6 +365,10 @@ class PyBuildExt(build_ext):
+ print("%-*s %-*s %-*s" % (longest, e, longest, f,
+ longest, g))
+
++ # There is no need to report missing module dependencies,
++ # if the modules have been disabled in the first place.
++ missing = list(set(missing) - set(sysconf_dis))
++
+ if missing:
+ print()
+ print("Python build finished successfully!")
diff --git a/meta/recipes-devtools/python/python3/12-distutils-prefix-is-inside-staging-area.patch b/meta/recipes-devtools/python/python3/12-distutils-prefix-is-inside-staging-area.patch
index 0bafec73c0..d49604ba4d 100644
--- a/meta/recipes-devtools/python/python3/12-distutils-prefix-is-inside-staging-area.patch
+++ b/meta/recipes-devtools/python/python3/12-distutils-prefix-is-inside-staging-area.patch
@@ -1,4 +1,4 @@
-From 6229502e5ae6cbb22240594f002638e9ef78f831 Mon Sep 17 00:00:00 2001
+From a274ba778838824efcacaba57c415b7262f779ec Mon Sep 17 00:00:00 2001
From: Khem Raj <raj.khem@gmail.com>
Date: Tue, 14 May 2013 15:00:26 -0700
Subject: [PATCH] python3: Add target and native recipes
diff --git a/meta/recipes-devtools/python/python3/CVE-2020-14422.patch b/meta/recipes-devtools/python/python3/CVE-2020-14422.patch
new file mode 100644
index 0000000000..31ad82d7c5
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/CVE-2020-14422.patch
@@ -0,0 +1,79 @@
+From b98e7790c77a4378ec4b1c71b84138cb930b69b7 Mon Sep 17 00:00:00 2001
+From: Tapas Kundu <39723251+tapakund@users.noreply.github.com>
+Date: Wed, 1 Jul 2020 00:50:21 +0530
+Subject: [PATCH] [3.7] bpo-41004: Resolve hash collisions for IPv4Interface
+ and IPv6Interface (GH-21033) (GH-21231)
+
+CVE-2020-14422
+The __hash__() methods of classes IPv4Interface and IPv6Interface had issue
+of generating constant hash values of 32 and 128 respectively causing hash collisions.
+The fix uses the hash() function to generate hash values for the objects
+instead of XOR operation
+(cherry picked from commit b30ee26e366bf509b7538d79bfec6c6d38d53f28)
+
+Co-authored-by: Ravi Teja P <rvteja92@gmail.com>
+
+Signed-off-by: Tapas Kundu <tkundu@vmware.com>
+
+Upstream-Status: Backport [https://github.com/python/cpython/commit/b98e7790c77a4378ec4b1c71b84138cb930b69b7]
+CVE: CVE-2020-14422
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ Lib/ipaddress.py | 4 ++--
+ Lib/test/test_ipaddress.py | 11 +++++++++++
+ .../Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst | 1 +
+ 3 files changed, 14 insertions(+), 2 deletions(-)
+ create mode 100644 Misc/NEWS.d/next/Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst
+
+diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py
+index 80249288d73ab..54882934c3dc1 100644
+--- a/Lib/ipaddress.py
++++ b/Lib/ipaddress.py
+@@ -1442,7 +1442,7 @@ def __lt__(self, other):
+ return False
+
+ def __hash__(self):
+- return self._ip ^ self._prefixlen ^ int(self.network.network_address)
++ return hash((self._ip, self._prefixlen, int(self.network.network_address)))
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+@@ -2088,7 +2088,7 @@ def __lt__(self, other):
+ return False
+
+ def __hash__(self):
+- return self._ip ^ self._prefixlen ^ int(self.network.network_address)
++ return hash((self._ip, self._prefixlen, int(self.network.network_address)))
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py
+index 455b893fb126f..1fb6a929dc2d9 100644
+--- a/Lib/test/test_ipaddress.py
++++ b/Lib/test/test_ipaddress.py
+@@ -2091,6 +2091,17 @@ def testsixtofour(self):
+ sixtofouraddr.sixtofour)
+ self.assertFalse(bad_addr.sixtofour)
+
++ # issue41004 Hash collisions in IPv4Interface and IPv6Interface
++ def testV4HashIsNotConstant(self):
++ ipv4_address1 = ipaddress.IPv4Interface("1.2.3.4")
++ ipv4_address2 = ipaddress.IPv4Interface("2.3.4.5")
++ self.assertNotEqual(ipv4_address1.__hash__(), ipv4_address2.__hash__())
++
++ # issue41004 Hash collisions in IPv4Interface and IPv6Interface
++ def testV6HashIsNotConstant(self):
++ ipv6_address1 = ipaddress.IPv6Interface("2001:658:22a:cafe:200:0:0:1")
++ ipv6_address2 = ipaddress.IPv6Interface("2001:658:22a:cafe:200:0:0:2")
++ self.assertNotEqual(ipv6_address1.__hash__(), ipv6_address2.__hash__())
+
+ if __name__ == '__main__':
+ unittest.main()
+diff --git a/Misc/NEWS.d/next/Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst b/Misc/NEWS.d/next/Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst
+new file mode 100644
+index 0000000000000..f5a9db52fff52
+--- /dev/null
++++ b/Misc/NEWS.d/next/Security/2020-06-29-16-02-29.bpo-41004.ovF0KZ.rst
+@@ -0,0 +1 @@
++CVE-2020-14422: The __hash__() methods of ipaddress.IPv4Interface and ipaddress.IPv6Interface incorrectly generated constant hash values of 32 and 128 respectively. This resulted in always causing hash collisions. The fix uses hash() to generate hash values for the tuple of (address, mask length, network address).
diff --git a/meta/recipes-devtools/python/python3/CVE-2020-26116.patch b/meta/recipes-devtools/python/python3/CVE-2020-26116.patch
new file mode 100644
index 0000000000..2820999063
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/CVE-2020-26116.patch
@@ -0,0 +1,106 @@
+From ca75fec1ed358f7324272608ca952b2d8226d11a Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <31488909+miss-islington@users.noreply.github.com>
+Date: Sun, 19 Jul 2020 02:27:35 -0700
+Subject: [PATCH] bpo-39603: Prevent header injection in http methods
+ (GH-18485) (GH-21538)
+
+reject control chars in http method in http.client.putrequest to prevent http header injection
+(cherry picked from commit 8ca8a2e8fb068863c1138f07e3098478ef8be12e)
+
+Co-authored-by: AMIR <31338382+amiremohamadi@users.noreply.github.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-26116
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ Lib/http/client.py | 15 +++++++++++++
+ Lib/test/test_httplib.py | 22 +++++++++++++++++++
+ .../2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst | 2 ++
+ 3 files changed, 39 insertions(+)
+ create mode 100644 Misc/NEWS.d/next/Security/2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst
+
+diff --git a/Lib/http/client.py b/Lib/http/client.py
+index 09c57af865..04cd8f7d84 100644
+--- a/Lib/http/client.py
++++ b/Lib/http/client.py
+@@ -150,6 +150,10 @@ _contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]')
+ # _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
+ # We are more lenient for assumed real world compatibility purposes.
+
++# These characters are not allowed within HTTP method names
++# to prevent http header injection.
++_contains_disallowed_method_pchar_re = re.compile('[\x00-\x1f]')
++
+ # We always set the Content-Length header for these methods because some
+ # servers will otherwise respond with a 411
+ _METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
+@@ -1109,6 +1113,8 @@ class HTTPConnection:
+ else:
+ raise CannotSendRequest(self.__state)
+
++ self._validate_method(method)
++
+ # Save the method for use later in the response phase
+ self._method = method
+
+@@ -1199,6 +1205,15 @@ class HTTPConnection:
+ # ASCII also helps prevent CVE-2019-9740.
+ return request.encode('ascii')
+
++ def _validate_method(self, method):
++ """Validate a method name for putrequest."""
++ # prevent http header injection
++ match = _contains_disallowed_method_pchar_re.search(method)
++ if match:
++ raise ValueError(
++ f"method can't contain control characters. {method!r} "
++ f"(found at least {match.group()!r})")
++
+ def _validate_path(self, url):
+ """Validate a url for putrequest."""
+ # Prevent CVE-2019-9740.
+diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
+index 891393ab86..3fa0691d3a 100644
+--- a/Lib/test/test_httplib.py
++++ b/Lib/test/test_httplib.py
+@@ -363,6 +363,28 @@ class HeaderTests(TestCase):
+ self.assertEqual(lines[3], "header: Second: val2")
+
+
++class HttpMethodTests(TestCase):
++ def test_invalid_method_names(self):
++ methods = (
++ 'GET\r',
++ 'POST\n',
++ 'PUT\n\r',
++ 'POST\nValue',
++ 'POST\nHOST:abc',
++ 'GET\nrHost:abc\n',
++ 'POST\rRemainder:\r',
++ 'GET\rHOST:\n',
++ '\nPUT'
++ )
++
++ for method in methods:
++ with self.assertRaisesRegex(
++ ValueError, "method can't contain control characters"):
++ conn = client.HTTPConnection('example.com')
++ conn.sock = FakeSocket(None)
++ conn.request(method=method, url="/")
++
++
+ class TransferEncodingTest(TestCase):
+ expected_body = b"It's just a flesh wound"
+
+diff --git a/Misc/NEWS.d/next/Security/2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst b/Misc/NEWS.d/next/Security/2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst
+new file mode 100644
+index 0000000000..990affc3ed
+--- /dev/null
++++ b/Misc/NEWS.d/next/Security/2020-02-12-14-17-39.bpo-39603.Gt3RSg.rst
+@@ -0,0 +1,2 @@
++Prevent http header injection by rejecting control characters in
++http.client.putrequest(...).
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/python/python3_3.7.6.bb b/meta/recipes-devtools/python/python3_3.7.8.bb
index b33b7028d4..cd4bee5a88 100644
--- a/meta/recipes-devtools/python/python3_3.7.6.bb
+++ b/meta/recipes-devtools/python/python3_3.7.8.bb
@@ -3,7 +3,7 @@ HOMEPAGE = "http://www.python.org"
LICENSE = "PSFv2"
SECTION = "devel/python"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=e466242989bd33c1bd2b6a526a742498"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=203a6dbc802ee896020a47161e759642"
SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://run-ptest \
@@ -28,18 +28,22 @@ SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://reformat_sysconfig.py \
file://0001-Use-FLAG_REF-always-for-interned-strings.patch \
file://0001-test_locale.py-correct-the-test-output-format.patch \
+ file://0017-setup.py-do-not-report-missing-dependencies-for-disa.patch \
+ file://CVE-2020-14422.patch \
+ file://CVE-2020-26116.patch \
"
SRC_URI_append_class-native = " \
file://0001-distutils-sysconfig-append-STAGING_LIBDIR-python-sys.patch \
file://12-distutils-prefix-is-inside-staging-area.patch \
+ file://0001-Don-t-search-system-for-headers-libraries.patch \
"
SRC_URI_append_class-nativesdk = " \
file://0001-main.c-if-OEPYTHON3HOME-is-set-use-instead-of-PYTHON.patch \
"
-SRC_URI[md5sum] = "c08fbee72ad5c2c95b0f4e44bf6fd72c"
-SRC_URI[sha256sum] = "55a2cce72049f0794e9a11a84862e9039af9183603b78bc60d89539f82cf533f"
+SRC_URI[md5sum] = "a224ef2249a18824f48fba9812f4006f"
+SRC_URI[sha256sum] = "43a543404b363f0037f89df8478f19db2dbc0d6f3ffee310bc2997fa71854a63"
# exclude pre-releases for both python 2.x and 3.x
UPSTREAM_CHECK_REGEX = "[Pp]ython-(?P<pver>\d+(\.\d+)+).tar"
@@ -65,7 +69,7 @@ ALTERNATIVE_LINK_NAME[python-config] = "${bindir}/python${PYTHON_BINABI}-config"
ALTERNATIVE_TARGET[python-config] = "${bindir}/python${PYTHON_BINABI}-config-${MULTILIB_SUFFIX}"
-DEPENDS = "bzip2-replacement-native libffi bzip2 gdbm openssl sqlite3 zlib virtual/libintl xz virtual/crypt util-linux libtirpc libnsl2"
+DEPENDS = "bzip2-replacement-native libffi bzip2 openssl sqlite3 zlib virtual/libintl xz virtual/crypt util-linux libtirpc libnsl2"
DEPENDS_append_class-target = " python3-native"
DEPENDS_append_class-nativesdk = " python3-native"
@@ -90,13 +94,23 @@ python() {
d.setVar('PACKAGECONFIG_PGO', '')
}
-PACKAGECONFIG_class-target ??= "readline ${PACKAGECONFIG_PGO}"
-PACKAGECONFIG_class-native ??= "readline"
-PACKAGECONFIG_class-nativesdk ??= "readline"
+PACKAGECONFIG_class-target ??= "readline ${PACKAGECONFIG_PGO} gdbm"
+PACKAGECONFIG_class-native ??= "readline gdbm"
+PACKAGECONFIG_class-nativesdk ??= "readline gdbm"
PACKAGECONFIG[readline] = ",,readline"
# Use profile guided optimisation by running PyBench inside qemu-user
PACKAGECONFIG[pgo] = "--enable-optimizations,,qemu-native"
PACKAGECONFIG[tk] = ",,tk"
+PACKAGECONFIG[gdbm] = ",,gdbm"
+
+do_configure_prepend () {
+ mkdir -p ${B}/Modules
+ cat > ${B}/Modules/Setup.local << EOF
+*disabled*
+${@bb.utils.contains('PACKAGECONFIG', 'gdbm', '', '_gdbm _dbm', d)}
+${@bb.utils.contains('PACKAGECONFIG', 'readline', '', 'readline', d)}
+EOF
+}
CPPFLAGS_append = " -I${STAGING_INCDIR}/ncursesw -I${STAGING_INCDIR}/uuid"
diff --git a/meta/recipes-devtools/python/python_2.7.17.bb b/meta/recipes-devtools/python/python_2.7.18.bb
index 5b856a5097..5b856a5097 100644
--- a/meta/recipes-devtools/python/python_2.7.17.bb
+++ b/meta/recipes-devtools/python/python_2.7.18.bb
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc
index bb444b63d9..cbade92ac9 100644
--- a/meta/recipes-devtools/qemu/qemu.inc
+++ b/meta/recipes-devtools/qemu/qemu.inc
@@ -27,9 +27,24 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
file://0008-linux-user-Fix-webkitgtk-hangs-on-32-bit-x86-target.patch \
file://0009-Fix-webkitgtk-builds.patch \
file://0010-configure-Add-pkg-config-handling-for-libgcrypt.patch \
+ file://0011-linux-user-remove-host-stime-syscall.patch \
file://CVE-2019-15890.patch \
file://CVE-2019-12068.patch \
- "
+ file://CVE-2020-1711.patch \
+ file://CVE-2019-20382.patch \
+ file://CVE-2020-7039-1.patch \
+ file://CVE-2020-7039-2.patch \
+ file://CVE-2020-7039-3.patch \
+ file://CVE-2020-7211.patch \
+ file://CVE-2020-11869.patch \
+ file://CVE-2020-13765.patch \
+ file://CVE-2020-10702.patch \
+ file://CVE-2020-16092.patch \
+ file://CVE-2020-10756.patch \
+ file://CVE-2020-15863.patch \
+ file://CVE-2020-14364.patch \
+ file://CVE-2020-12829.patch \
+ "
UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar"
SRC_URI[md5sum] = "cdf2b5ca52b9abac9bacb5842fa420f8"
@@ -164,6 +179,7 @@ PACKAGECONFIG[spice] = "--enable-spice,--disable-spice,spice"
# usbredir will be in meta-networking layer
PACKAGECONFIG[usb-redir] = "--enable-usb-redir,--disable-usb-redir,usbredir"
PACKAGECONFIG[snappy] = "--enable-snappy,--disable-snappy,snappy"
+PACKAGECONFIG[glusterfs] = "--enable-glusterfs,--disable-glusterfs,glusterfs"
INSANE_SKIP_${PN} = "arch"
diff --git a/meta/recipes-devtools/qemu/qemu/0011-linux-user-remove-host-stime-syscall.patch b/meta/recipes-devtools/qemu/qemu/0011-linux-user-remove-host-stime-syscall.patch
new file mode 100644
index 0000000000..659e6be45d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0011-linux-user-remove-host-stime-syscall.patch
@@ -0,0 +1,61 @@
+From 0f1f2d4596aee037d3ccbcf10592466daa54107f Mon Sep 17 00:00:00 2001
+From: Laurent Vivier <laurent@vivier.eu>
+Date: Tue, 12 Nov 2019 15:25:56 +0100
+Subject: [PATCH] linux-user: remove host stime() syscall
+
+stime() has been withdrawn from glibc
+(12cbde1dae6f "Use clock_settime to implement stime; withdraw stime.")
+
+Implement the target stime() syscall using host
+clock_settime(CLOCK_REALTIME, ...) as it is done internally in glibc.
+
+Tested qemu-ppc/x86_64 with:
+
+ #include <time.h>
+ #include <stdio.h>
+
+ int main(void)
+ {
+ time_t t;
+ int ret;
+
+ /* date -u -d"2019-11-12T15:11:00" "+%s" */
+ t = 1573571460;
+ ret = stime(&t);
+ printf("ret %d\n", ret);
+ return 0;
+ }
+
+ # date; ./stime; date
+ Tue Nov 12 14:18:32 UTC 2019
+ ret 0
+ Tue Nov 12 15:11:00 UTC 2019
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=0f1f2d4596aee037d3ccbcf10592466daa54107f]
+Buglink: https://bugs.launchpad.net/qemu/+bug/1852115
+Reported-by: Cole Robinson <crobinso@redhat.com>
+Signed-off-by: Laurent Vivier <laurent@vivier.eu>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Message-Id: <20191112142556.6335-1-laurent@vivier.eu>
+---
+ linux-user/syscall.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/linux-user/syscall.c
++++ b/linux-user/syscall.c
+@@ -7651,10 +7651,12 @@ static abi_long do_syscall1(void *cpu_en
+ #ifdef TARGET_NR_stime /* not on alpha */
+ case TARGET_NR_stime:
+ {
+- time_t host_time;
+- if (get_user_sal(host_time, arg1))
++ struct timespec ts;
++ ts.tv_nsec = 0;
++ if (get_user_sal(ts.tv_sec, arg1)) {
+ return -TARGET_EFAULT;
+- return get_errno(stime(&host_time));
++ }
++ return get_errno(clock_settime(CLOCK_REALTIME, &ts));
+ }
+ #endif
+ #ifdef TARGET_NR_alarm /* not on alpha */
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2019-20382.patch b/meta/recipes-devtools/qemu/qemu/CVE-2019-20382.patch
new file mode 100644
index 0000000000..183d100398
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2019-20382.patch
@@ -0,0 +1,1018 @@
+From 6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0 Mon Sep 17 00:00:00 2001
+From: Li Qiang <liq3ea@163.com>
+Date: Sat, 31 Aug 2019 08:39:22 -0700
+Subject: [PATCH] vnc: fix memory leak when vnc disconnect
+
+Currently when qemu receives a vnc connect, it creates a 'VncState' to
+represent this connection. In 'vnc_worker_thread_loop' it creates a
+local 'VncState'. The connection 'VcnState' and local 'VncState' exchange
+data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'.
+In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library
+opaque data. The 'VncState' used in 'zrle_compress_data' is the local
+'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz
+library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection
+'VncState'. In currently implementation there will be a memory leak when the
+vnc disconnect. Following is the asan output backtrack:
+
+Direct leak of 29760 byte(s) in 5 object(s) allocated from:
+ 0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3)
+ 1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb)
+ 2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7)
+ 3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87
+ 4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344
+ 5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919
+ 6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271
+ 7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340
+ 8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502
+ 9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb)
+ 10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb)
+
+This is because the opaque allocated in 'deflateInit2' is not freed in
+'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck'
+and in the latter will check whether 's->strm != strm'(libz's data structure).
+This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and
+not free the data allocated in 'deflateInit2'.
+
+The reason this happens is that the 'VncState' contains the whole 'VncZrle',
+so when calling 'deflateInit2', the 's->strm' will be the local address.
+So 's->strm != strm' will be true.
+
+To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer.
+Then the connection 'VncState' and local 'VncState' exchange mechanism will
+work as expection. The 'tight' of 'VncState' has the same issue, let's also turn
+it to a pointer.
+
+Reported-by: Ying Fang <fangying1@huawei.com>
+Signed-off-by: Li Qiang <liq3ea@163.com>
+Message-id: 20190831153922.121308-1-liq3ea@163.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0]
+CVE: CVE-2019-20382
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+---
+ ui/vnc-enc-tight.c | 219 +++++++++++++++++++++++++-------------------------
+ ui/vnc-enc-zlib.c | 11 +--
+ ui/vnc-enc-zrle.c | 68 ++++++++--------
+ ui/vnc-enc-zrle.inc.c | 2 +-
+ ui/vnc.c | 28 ++++---
+ ui/vnc.h | 4 +-
+ 6 files changed, 170 insertions(+), 162 deletions(-)
+
+diff --git a/ui/vnc-enc-tight.c b/ui/vnc-enc-tight.c
+index 9084c22..1e08518 100644
+--- a/ui/vnc-enc-tight.c
++++ b/ui/vnc-enc-tight.c
+@@ -116,7 +116,7 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h,
+
+ static bool tight_can_send_png_rect(VncState *vs, int w, int h)
+ {
+- if (vs->tight.type != VNC_ENCODING_TIGHT_PNG) {
++ if (vs->tight->type != VNC_ENCODING_TIGHT_PNG) {
+ return false;
+ }
+
+@@ -144,7 +144,7 @@ tight_detect_smooth_image24(VncState *vs, int w, int h)
+ int pixels = 0;
+ int pix, left[3];
+ unsigned int errors;
+- unsigned char *buf = vs->tight.tight.buffer;
++ unsigned char *buf = vs->tight->tight.buffer;
+
+ /*
+ * If client is big-endian, color samples begin from the second
+@@ -215,7 +215,7 @@ tight_detect_smooth_image24(VncState *vs, int w, int h)
+ int pixels = 0; \
+ int sample, sum, left[3]; \
+ unsigned int errors; \
+- unsigned char *buf = vs->tight.tight.buffer; \
++ unsigned char *buf = vs->tight->tight.buffer; \
+ \
+ endian = 0; /* FIXME */ \
+ \
+@@ -296,8 +296,8 @@ static int
+ tight_detect_smooth_image(VncState *vs, int w, int h)
+ {
+ unsigned int errors;
+- int compression = vs->tight.compression;
+- int quality = vs->tight.quality;
++ int compression = vs->tight->compression;
++ int quality = vs->tight->quality;
+
+ if (!vs->vd->lossy) {
+ return 0;
+@@ -309,7 +309,7 @@ tight_detect_smooth_image(VncState *vs, int w, int h)
+ return 0;
+ }
+
+- if (vs->tight.quality != (uint8_t)-1) {
++ if (vs->tight->quality != (uint8_t)-1) {
+ if (w * h < VNC_TIGHT_JPEG_MIN_RECT_SIZE) {
+ return 0;
+ }
+@@ -320,9 +320,9 @@ tight_detect_smooth_image(VncState *vs, int w, int h)
+ }
+
+ if (vs->client_pf.bytes_per_pixel == 4) {
+- if (vs->tight.pixel24) {
++ if (vs->tight->pixel24) {
+ errors = tight_detect_smooth_image24(vs, w, h);
+- if (vs->tight.quality != (uint8_t)-1) {
++ if (vs->tight->quality != (uint8_t)-1) {
+ return (errors < tight_conf[quality].jpeg_threshold24);
+ }
+ return (errors < tight_conf[compression].gradient_threshold24);
+@@ -352,7 +352,7 @@ tight_detect_smooth_image(VncState *vs, int w, int h)
+ uint##bpp##_t c0, c1, ci; \
+ int i, n0, n1; \
+ \
+- data = (uint##bpp##_t *)vs->tight.tight.buffer; \
++ data = (uint##bpp##_t *)vs->tight->tight.buffer; \
+ \
+ c0 = data[0]; \
+ i = 1; \
+@@ -423,9 +423,9 @@ static int tight_fill_palette(VncState *vs, int x, int y,
+ {
+ int max;
+
+- max = count / tight_conf[vs->tight.compression].idx_max_colors_divisor;
++ max = count / tight_conf[vs->tight->compression].idx_max_colors_divisor;
+ if (max < 2 &&
+- count >= tight_conf[vs->tight.compression].mono_min_rect_size) {
++ count >= tight_conf[vs->tight->compression].mono_min_rect_size) {
+ max = 2;
+ }
+ if (max >= 256) {
+@@ -558,7 +558,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h)
+ int x, y, c;
+
+ buf32 = (uint32_t *)buf;
+- memset(vs->tight.gradient.buffer, 0, w * 3 * sizeof(int));
++ memset(vs->tight->gradient.buffer, 0, w * 3 * sizeof(int));
+
+ if (1 /* FIXME */) {
+ shift[0] = vs->client_pf.rshift;
+@@ -575,7 +575,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h)
+ upper[c] = 0;
+ here[c] = 0;
+ }
+- prev = (int *)vs->tight.gradient.buffer;
++ prev = (int *)vs->tight->gradient.buffer;
+ for (x = 0; x < w; x++) {
+ pix32 = *buf32++;
+ for (c = 0; c < 3; c++) {
+@@ -615,7 +615,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h)
+ int prediction; \
+ int x, y, c; \
+ \
+- memset (vs->tight.gradient.buffer, 0, w * 3 * sizeof(int)); \
++ memset(vs->tight->gradient.buffer, 0, w * 3 * sizeof(int)); \
+ \
+ endian = 0; /* FIXME */ \
+ \
+@@ -631,7 +631,7 @@ tight_filter_gradient24(VncState *vs, uint8_t *buf, int w, int h)
+ upper[c] = 0; \
+ here[c] = 0; \
+ } \
+- prev = (int *)vs->tight.gradient.buffer; \
++ prev = (int *)vs->tight->gradient.buffer; \
+ for (x = 0; x < w; x++) { \
+ pix = *buf; \
+ if (endian) { \
+@@ -785,7 +785,7 @@ static void extend_solid_area(VncState *vs, int x, int y, int w, int h,
+ static int tight_init_stream(VncState *vs, int stream_id,
+ int level, int strategy)
+ {
+- z_streamp zstream = &vs->tight.stream[stream_id];
++ z_streamp zstream = &vs->tight->stream[stream_id];
+
+ if (zstream->opaque == NULL) {
+ int err;
+@@ -803,15 +803,15 @@ static int tight_init_stream(VncState *vs, int stream_id,
+ return -1;
+ }
+
+- vs->tight.levels[stream_id] = level;
++ vs->tight->levels[stream_id] = level;
+ zstream->opaque = vs;
+ }
+
+- if (vs->tight.levels[stream_id] != level) {
++ if (vs->tight->levels[stream_id] != level) {
+ if (deflateParams(zstream, level, strategy) != Z_OK) {
+ return -1;
+ }
+- vs->tight.levels[stream_id] = level;
++ vs->tight->levels[stream_id] = level;
+ }
+ return 0;
+ }
+@@ -839,11 +839,11 @@ static void tight_send_compact_size(VncState *vs, size_t len)
+ static int tight_compress_data(VncState *vs, int stream_id, size_t bytes,
+ int level, int strategy)
+ {
+- z_streamp zstream = &vs->tight.stream[stream_id];
++ z_streamp zstream = &vs->tight->stream[stream_id];
+ int previous_out;
+
+ if (bytes < VNC_TIGHT_MIN_TO_COMPRESS) {
+- vnc_write(vs, vs->tight.tight.buffer, vs->tight.tight.offset);
++ vnc_write(vs, vs->tight->tight.buffer, vs->tight->tight.offset);
+ return bytes;
+ }
+
+@@ -852,13 +852,13 @@ static int tight_compress_data(VncState *vs, int stream_id, size_t bytes,
+ }
+
+ /* reserve memory in output buffer */
+- buffer_reserve(&vs->tight.zlib, bytes + 64);
++ buffer_reserve(&vs->tight->zlib, bytes + 64);
+
+ /* set pointers */
+- zstream->next_in = vs->tight.tight.buffer;
+- zstream->avail_in = vs->tight.tight.offset;
+- zstream->next_out = vs->tight.zlib.buffer + vs->tight.zlib.offset;
+- zstream->avail_out = vs->tight.zlib.capacity - vs->tight.zlib.offset;
++ zstream->next_in = vs->tight->tight.buffer;
++ zstream->avail_in = vs->tight->tight.offset;
++ zstream->next_out = vs->tight->zlib.buffer + vs->tight->zlib.offset;
++ zstream->avail_out = vs->tight->zlib.capacity - vs->tight->zlib.offset;
+ previous_out = zstream->avail_out;
+ zstream->data_type = Z_BINARY;
+
+@@ -868,14 +868,14 @@ static int tight_compress_data(VncState *vs, int stream_id, size_t bytes,
+ return -1;
+ }
+
+- vs->tight.zlib.offset = vs->tight.zlib.capacity - zstream->avail_out;
++ vs->tight->zlib.offset = vs->tight->zlib.capacity - zstream->avail_out;
+ /* ...how much data has actually been produced by deflate() */
+ bytes = previous_out - zstream->avail_out;
+
+ tight_send_compact_size(vs, bytes);
+- vnc_write(vs, vs->tight.zlib.buffer, bytes);
++ vnc_write(vs, vs->tight->zlib.buffer, bytes);
+
+- buffer_reset(&vs->tight.zlib);
++ buffer_reset(&vs->tight->zlib);
+
+ return bytes;
+ }
+@@ -927,16 +927,17 @@ static int send_full_color_rect(VncState *vs, int x, int y, int w, int h)
+
+ vnc_write_u8(vs, stream << 4); /* no flushing, no filter */
+
+- if (vs->tight.pixel24) {
+- tight_pack24(vs, vs->tight.tight.buffer, w * h, &vs->tight.tight.offset);
++ if (vs->tight->pixel24) {
++ tight_pack24(vs, vs->tight->tight.buffer, w * h,
++ &vs->tight->tight.offset);
+ bytes = 3;
+ } else {
+ bytes = vs->client_pf.bytes_per_pixel;
+ }
+
+ bytes = tight_compress_data(vs, stream, w * h * bytes,
+- tight_conf[vs->tight.compression].raw_zlib_level,
+- Z_DEFAULT_STRATEGY);
++ tight_conf[vs->tight->compression].raw_zlib_level,
++ Z_DEFAULT_STRATEGY);
+
+ return (bytes >= 0);
+ }
+@@ -947,14 +948,14 @@ static int send_solid_rect(VncState *vs)
+
+ vnc_write_u8(vs, VNC_TIGHT_FILL << 4); /* no flushing, no filter */
+
+- if (vs->tight.pixel24) {
+- tight_pack24(vs, vs->tight.tight.buffer, 1, &vs->tight.tight.offset);
++ if (vs->tight->pixel24) {
++ tight_pack24(vs, vs->tight->tight.buffer, 1, &vs->tight->tight.offset);
+ bytes = 3;
+ } else {
+ bytes = vs->client_pf.bytes_per_pixel;
+ }
+
+- vnc_write(vs, vs->tight.tight.buffer, bytes);
++ vnc_write(vs, vs->tight->tight.buffer, bytes);
+ return 1;
+ }
+
+@@ -963,7 +964,7 @@ static int send_mono_rect(VncState *vs, int x, int y,
+ {
+ ssize_t bytes;
+ int stream = 1;
+- int level = tight_conf[vs->tight.compression].mono_zlib_level;
++ int level = tight_conf[vs->tight->compression].mono_zlib_level;
+
+ #ifdef CONFIG_VNC_PNG
+ if (tight_can_send_png_rect(vs, w, h)) {
+@@ -991,26 +992,26 @@ static int send_mono_rect(VncState *vs, int x, int y,
+ uint32_t buf[2] = {bg, fg};
+ size_t ret = sizeof (buf);
+
+- if (vs->tight.pixel24) {
++ if (vs->tight->pixel24) {
+ tight_pack24(vs, (unsigned char*)buf, 2, &ret);
+ }
+ vnc_write(vs, buf, ret);
+
+- tight_encode_mono_rect32(vs->tight.tight.buffer, w, h, bg, fg);
++ tight_encode_mono_rect32(vs->tight->tight.buffer, w, h, bg, fg);
+ break;
+ }
+ case 2:
+ vnc_write(vs, &bg, 2);
+ vnc_write(vs, &fg, 2);
+- tight_encode_mono_rect16(vs->tight.tight.buffer, w, h, bg, fg);
++ tight_encode_mono_rect16(vs->tight->tight.buffer, w, h, bg, fg);
+ break;
+ default:
+ vnc_write_u8(vs, bg);
+ vnc_write_u8(vs, fg);
+- tight_encode_mono_rect8(vs->tight.tight.buffer, w, h, bg, fg);
++ tight_encode_mono_rect8(vs->tight->tight.buffer, w, h, bg, fg);
+ break;
+ }
+- vs->tight.tight.offset = bytes;
++ vs->tight->tight.offset = bytes;
+
+ bytes = tight_compress_data(vs, stream, bytes, level, Z_DEFAULT_STRATEGY);
+ return (bytes >= 0);
+@@ -1040,7 +1041,7 @@ static void write_palette(int idx, uint32_t color, void *opaque)
+ static bool send_gradient_rect(VncState *vs, int x, int y, int w, int h)
+ {
+ int stream = 3;
+- int level = tight_conf[vs->tight.compression].gradient_zlib_level;
++ int level = tight_conf[vs->tight->compression].gradient_zlib_level;
+ ssize_t bytes;
+
+ if (vs->client_pf.bytes_per_pixel == 1) {
+@@ -1050,23 +1051,23 @@ static bool send_gradient_rect(VncState *vs, int x, int y, int w, int h)
+ vnc_write_u8(vs, (stream | VNC_TIGHT_EXPLICIT_FILTER) << 4);
+ vnc_write_u8(vs, VNC_TIGHT_FILTER_GRADIENT);
+
+- buffer_reserve(&vs->tight.gradient, w * 3 * sizeof (int));
++ buffer_reserve(&vs->tight->gradient, w * 3 * sizeof(int));
+
+- if (vs->tight.pixel24) {
+- tight_filter_gradient24(vs, vs->tight.tight.buffer, w, h);
++ if (vs->tight->pixel24) {
++ tight_filter_gradient24(vs, vs->tight->tight.buffer, w, h);
+ bytes = 3;
+ } else if (vs->client_pf.bytes_per_pixel == 4) {
+- tight_filter_gradient32(vs, (uint32_t *)vs->tight.tight.buffer, w, h);
++ tight_filter_gradient32(vs, (uint32_t *)vs->tight->tight.buffer, w, h);
+ bytes = 4;
+ } else {
+- tight_filter_gradient16(vs, (uint16_t *)vs->tight.tight.buffer, w, h);
++ tight_filter_gradient16(vs, (uint16_t *)vs->tight->tight.buffer, w, h);
+ bytes = 2;
+ }
+
+- buffer_reset(&vs->tight.gradient);
++ buffer_reset(&vs->tight->gradient);
+
+ bytes = w * h * bytes;
+- vs->tight.tight.offset = bytes;
++ vs->tight->tight.offset = bytes;
+
+ bytes = tight_compress_data(vs, stream, bytes,
+ level, Z_FILTERED);
+@@ -1077,7 +1078,7 @@ static int send_palette_rect(VncState *vs, int x, int y,
+ int w, int h, VncPalette *palette)
+ {
+ int stream = 2;
+- int level = tight_conf[vs->tight.compression].idx_zlib_level;
++ int level = tight_conf[vs->tight->compression].idx_zlib_level;
+ int colors;
+ ssize_t bytes;
+
+@@ -1104,12 +1105,12 @@ static int send_palette_rect(VncState *vs, int x, int y,
+ palette_iter(palette, write_palette, &priv);
+ vnc_write(vs, header, sizeof(header));
+
+- if (vs->tight.pixel24) {
++ if (vs->tight->pixel24) {
+ tight_pack24(vs, vs->output.buffer + old_offset, colors, &offset);
+ vs->output.offset = old_offset + offset;
+ }
+
+- tight_encode_indexed_rect32(vs->tight.tight.buffer, w * h, palette);
++ tight_encode_indexed_rect32(vs->tight->tight.buffer, w * h, palette);
+ break;
+ }
+ case 2:
+@@ -1119,7 +1120,7 @@ static int send_palette_rect(VncState *vs, int x, int y,
+
+ palette_iter(palette, write_palette, &priv);
+ vnc_write(vs, header, sizeof(header));
+- tight_encode_indexed_rect16(vs->tight.tight.buffer, w * h, palette);
++ tight_encode_indexed_rect16(vs->tight->tight.buffer, w * h, palette);
+ break;
+ }
+ default:
+@@ -1127,7 +1128,7 @@ static int send_palette_rect(VncState *vs, int x, int y,
+ break;
+ }
+ bytes = w * h;
+- vs->tight.tight.offset = bytes;
++ vs->tight->tight.offset = bytes;
+
+ bytes = tight_compress_data(vs, stream, bytes,
+ level, Z_DEFAULT_STRATEGY);
+@@ -1146,7 +1147,7 @@ static int send_palette_rect(VncState *vs, int x, int y,
+ static void jpeg_init_destination(j_compress_ptr cinfo)
+ {
+ VncState *vs = cinfo->client_data;
+- Buffer *buffer = &vs->tight.jpeg;
++ Buffer *buffer = &vs->tight->jpeg;
+
+ cinfo->dest->next_output_byte = (JOCTET *)buffer->buffer + buffer->offset;
+ cinfo->dest->free_in_buffer = (size_t)(buffer->capacity - buffer->offset);
+@@ -1156,7 +1157,7 @@ static void jpeg_init_destination(j_compress_ptr cinfo)
+ static boolean jpeg_empty_output_buffer(j_compress_ptr cinfo)
+ {
+ VncState *vs = cinfo->client_data;
+- Buffer *buffer = &vs->tight.jpeg;
++ Buffer *buffer = &vs->tight->jpeg;
+
+ buffer->offset = buffer->capacity;
+ buffer_reserve(buffer, 2048);
+@@ -1168,7 +1169,7 @@ static boolean jpeg_empty_output_buffer(j_compress_ptr cinfo)
+ static void jpeg_term_destination(j_compress_ptr cinfo)
+ {
+ VncState *vs = cinfo->client_data;
+- Buffer *buffer = &vs->tight.jpeg;
++ Buffer *buffer = &vs->tight->jpeg;
+
+ buffer->offset = buffer->capacity - cinfo->dest->free_in_buffer;
+ }
+@@ -1187,7 +1188,7 @@ static int send_jpeg_rect(VncState *vs, int x, int y, int w, int h, int quality)
+ return send_full_color_rect(vs, x, y, w, h);
+ }
+
+- buffer_reserve(&vs->tight.jpeg, 2048);
++ buffer_reserve(&vs->tight->jpeg, 2048);
+
+ cinfo.err = jpeg_std_error(&jerr);
+ jpeg_create_compress(&cinfo);
+@@ -1222,9 +1223,9 @@ static int send_jpeg_rect(VncState *vs, int x, int y, int w, int h, int quality)
+
+ vnc_write_u8(vs, VNC_TIGHT_JPEG << 4);
+
+- tight_send_compact_size(vs, vs->tight.jpeg.offset);
+- vnc_write(vs, vs->tight.jpeg.buffer, vs->tight.jpeg.offset);
+- buffer_reset(&vs->tight.jpeg);
++ tight_send_compact_size(vs, vs->tight->jpeg.offset);
++ vnc_write(vs, vs->tight->jpeg.buffer, vs->tight->jpeg.offset);
++ buffer_reset(&vs->tight->jpeg);
+
+ return 1;
+ }
+@@ -1240,7 +1241,7 @@ static void write_png_palette(int idx, uint32_t pix, void *opaque)
+ VncState *vs = priv->vs;
+ png_colorp color = &priv->png_palette[idx];
+
+- if (vs->tight.pixel24)
++ if (vs->tight->pixel24)
+ {
+ color->red = (pix >> vs->client_pf.rshift) & vs->client_pf.rmax;
+ color->green = (pix >> vs->client_pf.gshift) & vs->client_pf.gmax;
+@@ -1267,10 +1268,10 @@ static void png_write_data(png_structp png_ptr, png_bytep data,
+ {
+ VncState *vs = png_get_io_ptr(png_ptr);
+
+- buffer_reserve(&vs->tight.png, vs->tight.png.offset + length);
+- memcpy(vs->tight.png.buffer + vs->tight.png.offset, data, length);
++ buffer_reserve(&vs->tight->png, vs->tight->png.offset + length);
++ memcpy(vs->tight->png.buffer + vs->tight->png.offset, data, length);
+
+- vs->tight.png.offset += length;
++ vs->tight->png.offset += length;
+ }
+
+ static void png_flush_data(png_structp png_ptr)
+@@ -1295,8 +1296,8 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h,
+ png_infop info_ptr;
+ png_colorp png_palette = NULL;
+ pixman_image_t *linebuf;
+- int level = tight_png_conf[vs->tight.compression].png_zlib_level;
+- int filters = tight_png_conf[vs->tight.compression].png_filters;
++ int level = tight_png_conf[vs->tight->compression].png_zlib_level;
++ int filters = tight_png_conf[vs->tight->compression].png_filters;
+ uint8_t *buf;
+ int dy;
+
+@@ -1340,21 +1341,23 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h,
+ png_set_PLTE(png_ptr, info_ptr, png_palette, palette_size(palette));
+
+ if (vs->client_pf.bytes_per_pixel == 4) {
+- tight_encode_indexed_rect32(vs->tight.tight.buffer, w * h, palette);
++ tight_encode_indexed_rect32(vs->tight->tight.buffer, w * h,
++ palette);
+ } else {
+- tight_encode_indexed_rect16(vs->tight.tight.buffer, w * h, palette);
++ tight_encode_indexed_rect16(vs->tight->tight.buffer, w * h,
++ palette);
+ }
+ }
+
+ png_write_info(png_ptr, info_ptr);
+
+- buffer_reserve(&vs->tight.png, 2048);
++ buffer_reserve(&vs->tight->png, 2048);
+ linebuf = qemu_pixman_linebuf_create(PIXMAN_BE_r8g8b8, w);
+ buf = (uint8_t *)pixman_image_get_data(linebuf);
+ for (dy = 0; dy < h; dy++)
+ {
+ if (color_type == PNG_COLOR_TYPE_PALETTE) {
+- memcpy(buf, vs->tight.tight.buffer + (dy * w), w);
++ memcpy(buf, vs->tight->tight.buffer + (dy * w), w);
+ } else {
+ qemu_pixman_linebuf_fill(linebuf, vs->vd->server, w, x, y + dy);
+ }
+@@ -1372,27 +1375,27 @@ static int send_png_rect(VncState *vs, int x, int y, int w, int h,
+
+ vnc_write_u8(vs, VNC_TIGHT_PNG << 4);
+
+- tight_send_compact_size(vs, vs->tight.png.offset);
+- vnc_write(vs, vs->tight.png.buffer, vs->tight.png.offset);
+- buffer_reset(&vs->tight.png);
++ tight_send_compact_size(vs, vs->tight->png.offset);
++ vnc_write(vs, vs->tight->png.buffer, vs->tight->png.offset);
++ buffer_reset(&vs->tight->png);
+ return 1;
+ }
+ #endif /* CONFIG_VNC_PNG */
+
+ static void vnc_tight_start(VncState *vs)
+ {
+- buffer_reset(&vs->tight.tight);
++ buffer_reset(&vs->tight->tight);
+
+ // make the output buffer be the zlib buffer, so we can compress it later
+- vs->tight.tmp = vs->output;
+- vs->output = vs->tight.tight;
++ vs->tight->tmp = vs->output;
++ vs->output = vs->tight->tight;
+ }
+
+ static void vnc_tight_stop(VncState *vs)
+ {
+ // switch back to normal output/zlib buffers
+- vs->tight.tight = vs->output;
+- vs->output = vs->tight.tmp;
++ vs->tight->tight = vs->output;
++ vs->output = vs->tight->tmp;
+ }
+
+ static int send_sub_rect_nojpeg(VncState *vs, int x, int y, int w, int h,
+@@ -1426,9 +1429,9 @@ static int send_sub_rect_jpeg(VncState *vs, int x, int y, int w, int h,
+ int ret;
+
+ if (colors == 0) {
+- if (force || (tight_jpeg_conf[vs->tight.quality].jpeg_full &&
++ if (force || (tight_jpeg_conf[vs->tight->quality].jpeg_full &&
+ tight_detect_smooth_image(vs, w, h))) {
+- int quality = tight_conf[vs->tight.quality].jpeg_quality;
++ int quality = tight_conf[vs->tight->quality].jpeg_quality;
+
+ ret = send_jpeg_rect(vs, x, y, w, h, quality);
+ } else {
+@@ -1440,9 +1443,9 @@ static int send_sub_rect_jpeg(VncState *vs, int x, int y, int w, int h,
+ ret = send_mono_rect(vs, x, y, w, h, bg, fg);
+ } else if (colors <= 256) {
+ if (force || (colors > 96 &&
+- tight_jpeg_conf[vs->tight.quality].jpeg_idx &&
++ tight_jpeg_conf[vs->tight->quality].jpeg_idx &&
+ tight_detect_smooth_image(vs, w, h))) {
+- int quality = tight_conf[vs->tight.quality].jpeg_quality;
++ int quality = tight_conf[vs->tight->quality].jpeg_quality;
+
+ ret = send_jpeg_rect(vs, x, y, w, h, quality);
+ } else {
+@@ -1480,20 +1483,20 @@ static int send_sub_rect(VncState *vs, int x, int y, int w, int h)
+ qemu_thread_atexit_add(&vnc_tight_cleanup_notifier);
+ }
+
+- vnc_framebuffer_update(vs, x, y, w, h, vs->tight.type);
++ vnc_framebuffer_update(vs, x, y, w, h, vs->tight->type);
+
+ vnc_tight_start(vs);
+ vnc_raw_send_framebuffer_update(vs, x, y, w, h);
+ vnc_tight_stop(vs);
+
+ #ifdef CONFIG_VNC_JPEG
+- if (!vs->vd->non_adaptive && vs->tight.quality != (uint8_t)-1) {
++ if (!vs->vd->non_adaptive && vs->tight->quality != (uint8_t)-1) {
+ double freq = vnc_update_freq(vs, x, y, w, h);
+
+- if (freq < tight_jpeg_conf[vs->tight.quality].jpeg_freq_min) {
++ if (freq < tight_jpeg_conf[vs->tight->quality].jpeg_freq_min) {
+ allow_jpeg = false;
+ }
+- if (freq >= tight_jpeg_conf[vs->tight.quality].jpeg_freq_threshold) {
++ if (freq >= tight_jpeg_conf[vs->tight->quality].jpeg_freq_threshold) {
+ force_jpeg = true;
+ vnc_sent_lossy_rect(vs, x, y, w, h);
+ }
+@@ -1503,7 +1506,7 @@ static int send_sub_rect(VncState *vs, int x, int y, int w, int h)
+ colors = tight_fill_palette(vs, x, y, w * h, &bg, &fg, color_count_palette);
+
+ #ifdef CONFIG_VNC_JPEG
+- if (allow_jpeg && vs->tight.quality != (uint8_t)-1) {
++ if (allow_jpeg && vs->tight->quality != (uint8_t)-1) {
+ ret = send_sub_rect_jpeg(vs, x, y, w, h, bg, fg, colors,
+ color_count_palette, force_jpeg);
+ } else {
+@@ -1520,7 +1523,7 @@ static int send_sub_rect(VncState *vs, int x, int y, int w, int h)
+
+ static int send_sub_rect_solid(VncState *vs, int x, int y, int w, int h)
+ {
+- vnc_framebuffer_update(vs, x, y, w, h, vs->tight.type);
++ vnc_framebuffer_update(vs, x, y, w, h, vs->tight->type);
+
+ vnc_tight_start(vs);
+ vnc_raw_send_framebuffer_update(vs, x, y, w, h);
+@@ -1538,8 +1541,8 @@ static int send_rect_simple(VncState *vs, int x, int y, int w, int h,
+ int rw, rh;
+ int n = 0;
+
+- max_size = tight_conf[vs->tight.compression].max_rect_size;
+- max_width = tight_conf[vs->tight.compression].max_rect_width;
++ max_size = tight_conf[vs->tight->compression].max_rect_size;
++ max_width = tight_conf[vs->tight->compression].max_rect_width;
+
+ if (split && (w > max_width || w * h > max_size)) {
+ max_sub_width = (w > max_width) ? max_width : w;
+@@ -1648,16 +1651,16 @@ static int tight_send_framebuffer_update(VncState *vs, int x, int y,
+
+ if (vs->client_pf.bytes_per_pixel == 4 && vs->client_pf.rmax == 0xFF &&
+ vs->client_pf.bmax == 0xFF && vs->client_pf.gmax == 0xFF) {
+- vs->tight.pixel24 = true;
++ vs->tight->pixel24 = true;
+ } else {
+- vs->tight.pixel24 = false;
++ vs->tight->pixel24 = false;
+ }
+
+ #ifdef CONFIG_VNC_JPEG
+- if (vs->tight.quality != (uint8_t)-1) {
++ if (vs->tight->quality != (uint8_t)-1) {
+ double freq = vnc_update_freq(vs, x, y, w, h);
+
+- if (freq > tight_jpeg_conf[vs->tight.quality].jpeg_freq_threshold) {
++ if (freq > tight_jpeg_conf[vs->tight->quality].jpeg_freq_threshold) {
+ return send_rect_simple(vs, x, y, w, h, false);
+ }
+ }
+@@ -1669,8 +1672,8 @@ static int tight_send_framebuffer_update(VncState *vs, int x, int y,
+
+ /* Calculate maximum number of rows in one non-solid rectangle. */
+
+- max_rows = tight_conf[vs->tight.compression].max_rect_size;
+- max_rows /= MIN(tight_conf[vs->tight.compression].max_rect_width, w);
++ max_rows = tight_conf[vs->tight->compression].max_rect_size;
++ max_rows /= MIN(tight_conf[vs->tight->compression].max_rect_width, w);
+
+ return find_large_solid_color_rect(vs, x, y, w, h, max_rows);
+ }
+@@ -1678,33 +1681,33 @@ static int tight_send_framebuffer_update(VncState *vs, int x, int y,
+ int vnc_tight_send_framebuffer_update(VncState *vs, int x, int y,
+ int w, int h)
+ {
+- vs->tight.type = VNC_ENCODING_TIGHT;
++ vs->tight->type = VNC_ENCODING_TIGHT;
+ return tight_send_framebuffer_update(vs, x, y, w, h);
+ }
+
+ int vnc_tight_png_send_framebuffer_update(VncState *vs, int x, int y,
+ int w, int h)
+ {
+- vs->tight.type = VNC_ENCODING_TIGHT_PNG;
++ vs->tight->type = VNC_ENCODING_TIGHT_PNG;
+ return tight_send_framebuffer_update(vs, x, y, w, h);
+ }
+
+ void vnc_tight_clear(VncState *vs)
+ {
+ int i;
+- for (i=0; i<ARRAY_SIZE(vs->tight.stream); i++) {
+- if (vs->tight.stream[i].opaque) {
+- deflateEnd(&vs->tight.stream[i]);
++ for (i = 0; i < ARRAY_SIZE(vs->tight->stream); i++) {
++ if (vs->tight->stream[i].opaque) {
++ deflateEnd(&vs->tight->stream[i]);
+ }
+ }
+
+- buffer_free(&vs->tight.tight);
+- buffer_free(&vs->tight.zlib);
+- buffer_free(&vs->tight.gradient);
++ buffer_free(&vs->tight->tight);
++ buffer_free(&vs->tight->zlib);
++ buffer_free(&vs->tight->gradient);
+ #ifdef CONFIG_VNC_JPEG
+- buffer_free(&vs->tight.jpeg);
++ buffer_free(&vs->tight->jpeg);
+ #endif
+ #ifdef CONFIG_VNC_PNG
+- buffer_free(&vs->tight.png);
++ buffer_free(&vs->tight->png);
+ #endif
+ }
+diff --git a/ui/vnc-enc-zlib.c b/ui/vnc-enc-zlib.c
+index 33e9df2..900ae5b 100644
+--- a/ui/vnc-enc-zlib.c
++++ b/ui/vnc-enc-zlib.c
+@@ -76,7 +76,8 @@ static int vnc_zlib_stop(VncState *vs)
+ zstream->zalloc = vnc_zlib_zalloc;
+ zstream->zfree = vnc_zlib_zfree;
+
+- err = deflateInit2(zstream, vs->tight.compression, Z_DEFLATED, MAX_WBITS,
++ err = deflateInit2(zstream, vs->tight->compression, Z_DEFLATED,
++ MAX_WBITS,
+ MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY);
+
+ if (err != Z_OK) {
+@@ -84,16 +85,16 @@ static int vnc_zlib_stop(VncState *vs)
+ return -1;
+ }
+
+- vs->zlib.level = vs->tight.compression;
++ vs->zlib.level = vs->tight->compression;
+ zstream->opaque = vs;
+ }
+
+- if (vs->tight.compression != vs->zlib.level) {
+- if (deflateParams(zstream, vs->tight.compression,
++ if (vs->tight->compression != vs->zlib.level) {
++ if (deflateParams(zstream, vs->tight->compression,
+ Z_DEFAULT_STRATEGY) != Z_OK) {
+ return -1;
+ }
+- vs->zlib.level = vs->tight.compression;
++ vs->zlib.level = vs->tight->compression;
+ }
+
+ // reserve memory in output buffer
+diff --git a/ui/vnc-enc-zrle.c b/ui/vnc-enc-zrle.c
+index 7493a84..17fd28a 100644
+--- a/ui/vnc-enc-zrle.c
++++ b/ui/vnc-enc-zrle.c
+@@ -37,18 +37,18 @@ static const int bits_per_packed_pixel[] = {
+
+ static void vnc_zrle_start(VncState *vs)
+ {
+- buffer_reset(&vs->zrle.zrle);
++ buffer_reset(&vs->zrle->zrle);
+
+ /* make the output buffer be the zlib buffer, so we can compress it later */
+- vs->zrle.tmp = vs->output;
+- vs->output = vs->zrle.zrle;
++ vs->zrle->tmp = vs->output;
++ vs->output = vs->zrle->zrle;
+ }
+
+ static void vnc_zrle_stop(VncState *vs)
+ {
+ /* switch back to normal output/zlib buffers */
+- vs->zrle.zrle = vs->output;
+- vs->output = vs->zrle.tmp;
++ vs->zrle->zrle = vs->output;
++ vs->output = vs->zrle->tmp;
+ }
+
+ static void *zrle_convert_fb(VncState *vs, int x, int y, int w, int h,
+@@ -56,24 +56,24 @@ static void *zrle_convert_fb(VncState *vs, int x, int y, int w, int h,
+ {
+ Buffer tmp;
+
+- buffer_reset(&vs->zrle.fb);
+- buffer_reserve(&vs->zrle.fb, w * h * bpp + bpp);
++ buffer_reset(&vs->zrle->fb);
++ buffer_reserve(&vs->zrle->fb, w * h * bpp + bpp);
+
+ tmp = vs->output;
+- vs->output = vs->zrle.fb;
++ vs->output = vs->zrle->fb;
+
+ vnc_raw_send_framebuffer_update(vs, x, y, w, h);
+
+- vs->zrle.fb = vs->output;
++ vs->zrle->fb = vs->output;
+ vs->output = tmp;
+- return vs->zrle.fb.buffer;
++ return vs->zrle->fb.buffer;
+ }
+
+ static int zrle_compress_data(VncState *vs, int level)
+ {
+- z_streamp zstream = &vs->zrle.stream;
++ z_streamp zstream = &vs->zrle->stream;
+
+- buffer_reset(&vs->zrle.zlib);
++ buffer_reset(&vs->zrle->zlib);
+
+ if (zstream->opaque != vs) {
+ int err;
+@@ -93,13 +93,13 @@ static int zrle_compress_data(VncState *vs, int level)
+ }
+
+ /* reserve memory in output buffer */
+- buffer_reserve(&vs->zrle.zlib, vs->zrle.zrle.offset + 64);
++ buffer_reserve(&vs->zrle->zlib, vs->zrle->zrle.offset + 64);
+
+ /* set pointers */
+- zstream->next_in = vs->zrle.zrle.buffer;
+- zstream->avail_in = vs->zrle.zrle.offset;
+- zstream->next_out = vs->zrle.zlib.buffer + vs->zrle.zlib.offset;
+- zstream->avail_out = vs->zrle.zlib.capacity - vs->zrle.zlib.offset;
++ zstream->next_in = vs->zrle->zrle.buffer;
++ zstream->avail_in = vs->zrle->zrle.offset;
++ zstream->next_out = vs->zrle->zlib.buffer + vs->zrle->zlib.offset;
++ zstream->avail_out = vs->zrle->zlib.capacity - vs->zrle->zlib.offset;
+ zstream->data_type = Z_BINARY;
+
+ /* start encoding */
+@@ -108,8 +108,8 @@ static int zrle_compress_data(VncState *vs, int level)
+ return -1;
+ }
+
+- vs->zrle.zlib.offset = vs->zrle.zlib.capacity - zstream->avail_out;
+- return vs->zrle.zlib.offset;
++ vs->zrle->zlib.offset = vs->zrle->zlib.capacity - zstream->avail_out;
++ return vs->zrle->zlib.offset;
+ }
+
+ /* Try to work out whether to use RLE and/or a palette. We do this by
+@@ -259,14 +259,14 @@ static int zrle_send_framebuffer_update(VncState *vs, int x, int y,
+ size_t bytes;
+ int zywrle_level;
+
+- if (vs->zrle.type == VNC_ENCODING_ZYWRLE) {
+- if (!vs->vd->lossy || vs->tight.quality == (uint8_t)-1
+- || vs->tight.quality == 9) {
++ if (vs->zrle->type == VNC_ENCODING_ZYWRLE) {
++ if (!vs->vd->lossy || vs->tight->quality == (uint8_t)-1
++ || vs->tight->quality == 9) {
+ zywrle_level = 0;
+- vs->zrle.type = VNC_ENCODING_ZRLE;
+- } else if (vs->tight.quality < 3) {
++ vs->zrle->type = VNC_ENCODING_ZRLE;
++ } else if (vs->tight->quality < 3) {
+ zywrle_level = 3;
+- } else if (vs->tight.quality < 6) {
++ } else if (vs->tight->quality < 6) {
+ zywrle_level = 2;
+ } else {
+ zywrle_level = 1;
+@@ -337,30 +337,30 @@ static int zrle_send_framebuffer_update(VncState *vs, int x, int y,
+
+ vnc_zrle_stop(vs);
+ bytes = zrle_compress_data(vs, Z_DEFAULT_COMPRESSION);
+- vnc_framebuffer_update(vs, x, y, w, h, vs->zrle.type);
++ vnc_framebuffer_update(vs, x, y, w, h, vs->zrle->type);
+ vnc_write_u32(vs, bytes);
+- vnc_write(vs, vs->zrle.zlib.buffer, vs->zrle.zlib.offset);
++ vnc_write(vs, vs->zrle->zlib.buffer, vs->zrle->zlib.offset);
+ return 1;
+ }
+
+ int vnc_zrle_send_framebuffer_update(VncState *vs, int x, int y, int w, int h)
+ {
+- vs->zrle.type = VNC_ENCODING_ZRLE;
++ vs->zrle->type = VNC_ENCODING_ZRLE;
+ return zrle_send_framebuffer_update(vs, x, y, w, h);
+ }
+
+ int vnc_zywrle_send_framebuffer_update(VncState *vs, int x, int y, int w, int h)
+ {
+- vs->zrle.type = VNC_ENCODING_ZYWRLE;
++ vs->zrle->type = VNC_ENCODING_ZYWRLE;
+ return zrle_send_framebuffer_update(vs, x, y, w, h);
+ }
+
+ void vnc_zrle_clear(VncState *vs)
+ {
+- if (vs->zrle.stream.opaque) {
+- deflateEnd(&vs->zrle.stream);
++ if (vs->zrle->stream.opaque) {
++ deflateEnd(&vs->zrle->stream);
+ }
+- buffer_free(&vs->zrle.zrle);
+- buffer_free(&vs->zrle.fb);
+- buffer_free(&vs->zrle.zlib);
++ buffer_free(&vs->zrle->zrle);
++ buffer_free(&vs->zrle->fb);
++ buffer_free(&vs->zrle->zlib);
+ }
+diff --git a/ui/vnc-enc-zrle.inc.c b/ui/vnc-enc-zrle.inc.c
+index abf6b86..c107d8a 100644
+--- a/ui/vnc-enc-zrle.inc.c
++++ b/ui/vnc-enc-zrle.inc.c
+@@ -96,7 +96,7 @@ static void ZRLE_ENCODE(VncState *vs, int x, int y, int w, int h,
+ static void ZRLE_ENCODE_TILE(VncState *vs, ZRLE_PIXEL *data, int w, int h,
+ int zywrle_level)
+ {
+- VncPalette *palette = &vs->zrle.palette;
++ VncPalette *palette = &vs->zrle->palette;
+
+ int runs = 0;
+ int single_pixels = 0;
+diff --git a/ui/vnc.c b/ui/vnc.c
+index bc43c4c..87b8045 100644
+--- a/ui/vnc.c
++++ b/ui/vnc.c
+@@ -1307,6 +1307,8 @@ void vnc_disconnect_finish(VncState *vs)
+ object_unref(OBJECT(vs->sioc));
+ vs->sioc = NULL;
+ vs->magic = 0;
++ g_free(vs->zrle);
++ g_free(vs->tight);
+ g_free(vs);
+ }
+
+@@ -2058,8 +2060,8 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings)
+
+ vs->features = 0;
+ vs->vnc_encoding = 0;
+- vs->tight.compression = 9;
+- vs->tight.quality = -1; /* Lossless by default */
++ vs->tight->compression = 9;
++ vs->tight->quality = -1; /* Lossless by default */
+ vs->absolute = -1;
+
+ /*
+@@ -2127,11 +2129,11 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings)
+ vs->features |= VNC_FEATURE_LED_STATE_MASK;
+ break;
+ case VNC_ENCODING_COMPRESSLEVEL0 ... VNC_ENCODING_COMPRESSLEVEL0 + 9:
+- vs->tight.compression = (enc & 0x0F);
++ vs->tight->compression = (enc & 0x0F);
+ break;
+ case VNC_ENCODING_QUALITYLEVEL0 ... VNC_ENCODING_QUALITYLEVEL0 + 9:
+ if (vs->vd->lossy) {
+- vs->tight.quality = (enc & 0x0F);
++ vs->tight->quality = (enc & 0x0F);
+ }
+ break;
+ default:
+@@ -3034,6 +3036,8 @@ static void vnc_connect(VncDisplay *vd, QIOChannelSocket *sioc,
+ int i;
+
+ trace_vnc_client_connect(vs, sioc);
++ vs->zrle = g_new0(VncZrle, 1);
++ vs->tight = g_new0(VncTight, 1);
+ vs->magic = VNC_MAGIC;
+ vs->sioc = sioc;
+ object_ref(OBJECT(vs->sioc));
+@@ -3045,19 +3049,19 @@ static void vnc_connect(VncDisplay *vd, QIOChannelSocket *sioc,
+ buffer_init(&vs->output, "vnc-output/%p", sioc);
+ buffer_init(&vs->jobs_buffer, "vnc-jobs_buffer/%p", sioc);
+
+- buffer_init(&vs->tight.tight, "vnc-tight/%p", sioc);
+- buffer_init(&vs->tight.zlib, "vnc-tight-zlib/%p", sioc);
+- buffer_init(&vs->tight.gradient, "vnc-tight-gradient/%p", sioc);
++ buffer_init(&vs->tight->tight, "vnc-tight/%p", sioc);
++ buffer_init(&vs->tight->zlib, "vnc-tight-zlib/%p", sioc);
++ buffer_init(&vs->tight->gradient, "vnc-tight-gradient/%p", sioc);
+ #ifdef CONFIG_VNC_JPEG
+- buffer_init(&vs->tight.jpeg, "vnc-tight-jpeg/%p", sioc);
++ buffer_init(&vs->tight->jpeg, "vnc-tight-jpeg/%p", sioc);
+ #endif
+ #ifdef CONFIG_VNC_PNG
+- buffer_init(&vs->tight.png, "vnc-tight-png/%p", sioc);
++ buffer_init(&vs->tight->png, "vnc-tight-png/%p", sioc);
+ #endif
+ buffer_init(&vs->zlib.zlib, "vnc-zlib/%p", sioc);
+- buffer_init(&vs->zrle.zrle, "vnc-zrle/%p", sioc);
+- buffer_init(&vs->zrle.fb, "vnc-zrle-fb/%p", sioc);
+- buffer_init(&vs->zrle.zlib, "vnc-zrle-zlib/%p", sioc);
++ buffer_init(&vs->zrle->zrle, "vnc-zrle/%p", sioc);
++ buffer_init(&vs->zrle->fb, "vnc-zrle-fb/%p", sioc);
++ buffer_init(&vs->zrle->zlib, "vnc-zrle-zlib/%p", sioc);
+
+ if (skipauth) {
+ vs->auth = VNC_AUTH_NONE;
+diff --git a/ui/vnc.h b/ui/vnc.h
+index 8643860..fea79c2 100644
+--- a/ui/vnc.h
++++ b/ui/vnc.h
+@@ -338,10 +338,10 @@ struct VncState
+ /* Encoding specific, if you add something here, don't forget to
+ * update vnc_async_encoding_start()
+ */
+- VncTight tight;
++ VncTight *tight;
+ VncZlib zlib;
+ VncHextile hextile;
+- VncZrle zrle;
++ VncZrle *zrle;
+ VncZywrle zywrle;
+
+ Notifier mouse_mode_notifier;
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-10702.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-10702.patch
new file mode 100644
index 0000000000..21a3ceb30d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-10702.patch
@@ -0,0 +1,52 @@
+From de0b1bae6461f67243282555475f88b2384a1eb9 Mon Sep 17 00:00:00 2001
+From: Vincent Dehors <vincent.dehors@smile.fr>
+Date: Thu, 23 Jan 2020 15:22:38 +0000
+Subject: [PATCH] target/arm: Fix PAuth sbox functions
+
+In the PAC computation, sbox was applied over wrong bits.
+As this is a 4-bit sbox, bit index should be incremented by 4 instead of 16.
+
+Test vector from QARMA paper (https://eprint.iacr.org/2016/444.pdf) was
+used to verify one computation of the pauth_computepac() function which
+uses sbox2.
+
+Launchpad: https://bugs.launchpad.net/bugs/1859713
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Vincent DEHORS <vincent.dehors@smile.fr>
+Signed-off-by: Adrien GRASSEIN <adrien.grassein@smile.fr>
+Message-id: 20200116230809.19078-2-richard.henderson@linaro.org
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=patch;h=de0b1bae6461f67243282555475f88b2384a1eb9]
+CVE: CVE-2020-10702
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ target/arm/pauth_helper.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c
+index d3194f2..0a5f41e 100644
+--- a/target/arm/pauth_helper.c
++++ b/target/arm/pauth_helper.c
+@@ -89,7 +89,7 @@ static uint64_t pac_sub(uint64_t i)
+ uint64_t o = 0;
+ int b;
+
+- for (b = 0; b < 64; b += 16) {
++ for (b = 0; b < 64; b += 4) {
+ o |= (uint64_t)sub[(i >> b) & 0xf] << b;
+ }
+ return o;
+@@ -104,7 +104,7 @@ static uint64_t pac_inv_sub(uint64_t i)
+ uint64_t o = 0;
+ int b;
+
+- for (b = 0; b < 64; b += 16) {
++ for (b = 0; b < 64; b += 4) {
+ o |= (uint64_t)inv_sub[(i >> b) & 0xf] << b;
+ }
+ return o;
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-10756.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-10756.patch
new file mode 100644
index 0000000000..306aef061b
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-10756.patch
@@ -0,0 +1,40 @@
+From c7ede54cbd2e2b25385325600958ba0124e31cc0 Mon Sep 17 00:00:00 2001
+From: Ralf Haferkamp <rhafer@suse.com>
+Date: Fri, 3 Jul 2020 14:51:16 +0200
+Subject: [PATCH] Drop bogus IPv6 messages
+
+Drop IPv6 message shorter than what's mentioned in the payload
+length header (+ the size of the IPv6 header). They're invalid an could
+lead to data leakage in icmp6_send_echoreply().
+
+CVE: CVE-2020-10756
+Upstream-Status: Backport
+https://gitlab.freedesktop.org/slirp/libslirp/-/commit/c7ede54cbd2e2b25385325600958ba0124e31cc0
+
+[SG: Based on libslirp commit c7ede54cbd2e2b25385325600958ba0124e31cc0 and adjusted context]
+Signed-off-by: Stefan Ghinea <stefan.ghinea@windriver.com>
+---
+ slirp/src/ip6_input.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/slirp/src/ip6_input.c b/slirp/src/ip6_input.c
+index d9d2b7e9..0f2b1785 100644
+--- a/slirp/src/ip6_input.c
++++ b/slirp/src/ip6_input.c
+@@ -49,6 +49,13 @@ void ip6_input(struct mbuf *m)
+ goto bad;
+ }
+
++ // Check if the message size is big enough to hold what's
++ // set in the payload length header. If not this is an invalid
++ // packet
++ if (m->m_len < ntohs(ip6->ip_pl) + sizeof(struct ip6)) {
++ goto bad;
++ }
++
+ /* check ip_ttl for a correct ICMP reply */
+ if (ip6->ip_hl == 0) {
+ icmp6_send_error(m, ICMP6_TIMXCEED, ICMP6_TIMXCEED_INTRANS);
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-11869.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-11869.patch
new file mode 100644
index 0000000000..ca7ffed934
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-11869.patch
@@ -0,0 +1,97 @@
+From ac2071c3791b67fc7af78b8ceb320c01ca1b5df7 Mon Sep 17 00:00:00 2001
+From: BALATON Zoltan <balaton@eik.bme.hu>
+Date: Mon, 6 Apr 2020 22:34:26 +0200
+Subject: [PATCH] ati-vga: Fix checks in ati_2d_blt() to avoid crash
+
+In some corner cases (that never happen during normal operation but a
+malicious guest could program wrong values) pixman functions were
+called with parameters that result in a crash. Fix this and add more
+checks to disallow such cases.
+
+Reported-by: Ziming Zhang <ezrakiez@gmail.com>
+Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
+Message-id: 20200406204029.19559747D5D@zero.eik.bme.hu
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=ac2071c3791b67fc7af78b8ceb320c01ca1b5df7]
+CVE: CVE-2020-11869
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ hw/display/ati_2d.c | 37 ++++++++++++++++++++++++++-----------
+ 1 file changed, 26 insertions(+), 11 deletions(-)
+
+diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c
+index 42e8231..23a8ae0 100644
+--- a/hw/display/ati_2d.c
++++ b/hw/display/ati_2d.c
+@@ -53,12 +53,20 @@ void ati_2d_blt(ATIVGAState *s)
+ s->vga.vbe_start_addr, surface_data(ds), surface_stride(ds),
+ surface_bits_per_pixel(ds),
+ (s->regs.dp_mix & GMC_ROP3_MASK) >> 16);
+- int dst_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ?
+- s->regs.dst_x : s->regs.dst_x + 1 - s->regs.dst_width);
+- int dst_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ?
+- s->regs.dst_y : s->regs.dst_y + 1 - s->regs.dst_height);
++ unsigned dst_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ?
++ s->regs.dst_x : s->regs.dst_x + 1 - s->regs.dst_width);
++ unsigned dst_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ?
++ s->regs.dst_y : s->regs.dst_y + 1 - s->regs.dst_height);
+ int bpp = ati_bpp_from_datatype(s);
++ if (!bpp) {
++ qemu_log_mask(LOG_GUEST_ERROR, "Invalid bpp\n");
++ return;
++ }
+ int dst_stride = DEFAULT_CNTL ? s->regs.dst_pitch : s->regs.default_pitch;
++ if (!dst_stride) {
++ qemu_log_mask(LOG_GUEST_ERROR, "Zero dest pitch\n");
++ return;
++ }
+ uint8_t *dst_bits = s->vga.vram_ptr + (DEFAULT_CNTL ?
+ s->regs.dst_offset : s->regs.default_offset);
+
+@@ -82,12 +90,16 @@ void ati_2d_blt(ATIVGAState *s)
+ switch (s->regs.dp_mix & GMC_ROP3_MASK) {
+ case ROP3_SRCCOPY:
+ {
+- int src_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ?
+- s->regs.src_x : s->regs.src_x + 1 - s->regs.dst_width);
+- int src_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ?
+- s->regs.src_y : s->regs.src_y + 1 - s->regs.dst_height);
++ unsigned src_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ?
++ s->regs.src_x : s->regs.src_x + 1 - s->regs.dst_width);
++ unsigned src_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ?
++ s->regs.src_y : s->regs.src_y + 1 - s->regs.dst_height);
+ int src_stride = DEFAULT_CNTL ?
+ s->regs.src_pitch : s->regs.default_pitch;
++ if (!src_stride) {
++ qemu_log_mask(LOG_GUEST_ERROR, "Zero source pitch\n");
++ return;
++ }
+ uint8_t *src_bits = s->vga.vram_ptr + (DEFAULT_CNTL ?
+ s->regs.src_offset : s->regs.default_offset);
+
+@@ -137,8 +149,10 @@ void ati_2d_blt(ATIVGAState *s)
+ dst_y * surface_stride(ds),
+ s->regs.dst_height * surface_stride(ds));
+ }
+- s->regs.dst_x += s->regs.dst_width;
+- s->regs.dst_y += s->regs.dst_height;
++ s->regs.dst_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ?
++ dst_x + s->regs.dst_width : dst_x);
++ s->regs.dst_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ?
++ dst_y + s->regs.dst_height : dst_y);
+ break;
+ }
+ case ROP3_PATCOPY:
+@@ -179,7 +193,8 @@ void ati_2d_blt(ATIVGAState *s)
+ dst_y * surface_stride(ds),
+ s->regs.dst_height * surface_stride(ds));
+ }
+- s->regs.dst_y += s->regs.dst_height;
++ s->regs.dst_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ?
++ dst_y + s->regs.dst_height : dst_y);
+ break;
+ }
+ default:
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-12829.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829.patch
new file mode 100644
index 0000000000..46e494dec0
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-12829.patch
@@ -0,0 +1,267 @@
+From b15a22bbcbe6a78dc3d88fe3134985e4cdd87de4 Mon Sep 17 00:00:00 2001
+From: BALATON Zoltan <balaton@eik.bme.hu>
+Date: Thu, 21 May 2020 21:39:44 +0200
+Subject: [PATCH] sm501: Replace hand written implementation with pixman
+where possible
+
+Besides being faster this should also prevent malicious guests to
+abuse 2D engine to overwrite data or cause a crash.
+
+Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
+Message-id:
+58666389b6cae256e4e972a32c05cf8aa51bffc0.1590089984.git.balaton@eik.bme.hu
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-12829
+[https://git.qemu.org/?p=qemu.git;a=commit;h=b15a22bbcbe6a78dc3d88fe3134985e4cdd87de4]
+Signed-off-by: Li Wang <li.wang@windriver.com>
+---
+ hw/display/sm501.c | 205 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 118 insertions(+), 87 deletions(-)
+
+diff --git a/hw/display/sm501.c b/hw/display/sm501.c
+index 5918f59..b52c7e8 100644
+--- a/hw/display/sm501.c
++++ b/hw/display/sm501.c
+@@ -702,12 +702,12 @@ static void sm501_2d_operation(SM501State *s)
+ /* obtain operation parameters */
+ int operation = (s->twoD_control >> 16) & 0x1f;
+ int rtl = s->twoD_control & 0x8000000;
+- int src_x = (s->twoD_source >> 16) & 0x01FFF;
+- int src_y = s->twoD_source & 0xFFFF;
+- int dst_x = (s->twoD_destination >> 16) & 0x01FFF;
+- int dst_y = s->twoD_destination & 0xFFFF;
+- int operation_width = (s->twoD_dimension >> 16) & 0x1FFF;
+- int operation_height = s->twoD_dimension & 0xFFFF;
++ unsigned int src_x = (s->twoD_source >> 16) & 0x01FFF;
++ unsigned int src_y = s->twoD_source & 0xFFFF;
++ unsigned int dst_x = (s->twoD_destination >> 16) & 0x01FFF;
++ unsigned int dst_y = s->twoD_destination & 0xFFFF;
++ unsigned int operation_width = (s->twoD_dimension >> 16) & 0x1FFF;
++ unsigned int operation_height = s->twoD_dimension & 0xFFFF;
+ uint32_t color = s->twoD_foreground;
+ int format_flags = (s->twoD_stretch >> 20) & 0x3;
+ int addressing = (s->twoD_stretch >> 16) & 0xF;
+@@ -719,10 +719,8 @@ static void sm501_2d_operation(SM501State *s)
+ uint32_t dst_base = s->twoD_destination_base & 0x03FFFFFF;
+
+ /* get frame buffer info */
+- uint8_t *src = s->local_mem + src_base;
+- uint8_t *dst = s->local_mem + dst_base;
+- int src_width = s->twoD_pitch & 0x1FFF;
+- int dst_width = (s->twoD_pitch >> 16) & 0x1FFF;
++ unsigned int src_width = s->twoD_pitch & 0x1FFF;
++ unsigned int dst_width = (s->twoD_pitch >> 16) & 0x1FFF;
+ int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0;
+ int fb_len = get_width(s, crt) * get_height(s, crt) * get_bpp(s, crt);
+
+@@ -731,95 +729,128 @@ static void sm501_2d_operation(SM501State *s)
+ abort();
+ }
+
+- if (rop_mode == 0) {
+- if (rop != 0xcc) {
+- /* Anything other than plain copies are not supported */
+- qemu_log_mask(LOG_UNIMP, "sm501: rop3 mode with rop %x is not "
+- "supported.\n", rop);
+- }
+- } else {
+- if (rop2_source_is_pattern && rop != 0x5) {
+- /* For pattern source, we support only inverse dest */
+- qemu_log_mask(LOG_UNIMP, "sm501: rop2 source being the pattern and "
+- "rop %x is not supported.\n", rop);
+- } else {
+- if (rop != 0x5 && rop != 0xc) {
+- /* Anything other than plain copies or inverse dest is not
+- * supported */
+- qemu_log_mask(LOG_UNIMP, "sm501: rop mode %x is not "
+- "supported.\n", rop);
+- }
+- }
+- }
+-
+ if ((s->twoD_source_base & 0x08000000) ||
+ (s->twoD_destination_base & 0x08000000)) {
+ printf("%s: only local memory is supported.\n", __func__);
+ abort();
+ }
+
+- switch (operation) {
+- case 0x00: /* copy area */
+-#define COPY_AREA(_bpp, _pixel_type, rtl) { \
+- int y, x, index_d, index_s; \
+- for (y = 0; y < operation_height; y++) { \
+- for (x = 0; x < operation_width; x++) { \
+- _pixel_type val; \
+- \
+- if (rtl) { \
+- index_s = ((src_y - y) * src_width + src_x - x) * _bpp; \
+- index_d = ((dst_y - y) * dst_width + dst_x - x) * _bpp; \
+- } else { \
+- index_s = ((src_y + y) * src_width + src_x + x) * _bpp; \
+- index_d = ((dst_y + y) * dst_width + dst_x + x) * _bpp; \
+- } \
+- if (rop_mode == 1 && rop == 5) { \
+- /* Invert dest */ \
+- val = ~*(_pixel_type *)&dst[index_d]; \
+- } else { \
+- val = *(_pixel_type *)&src[index_s]; \
+- } \
+- *(_pixel_type *)&dst[index_d] = val; \
+- } \
+- } \
++ if (!dst_width) {
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero dest pitch.\n");
++ return;
+ }
+- switch (format_flags) {
+- case 0:
+- COPY_AREA(1, uint8_t, rtl);
+- break;
+- case 1:
+- COPY_AREA(2, uint16_t, rtl);
+- break;
+- case 2:
+- COPY_AREA(4, uint32_t, rtl);
+- break;
+- }
+- break;
+
+- case 0x01: /* fill rectangle */
+-#define FILL_RECT(_bpp, _pixel_type) { \
+- int y, x; \
+- for (y = 0; y < operation_height; y++) { \
+- for (x = 0; x < operation_width; x++) { \
+- int index = ((dst_y + y) * dst_width + dst_x + x) * _bpp; \
+- *(_pixel_type *)&dst[index] = (_pixel_type)color; \
+- } \
+- } \
++ if (!operation_width || !operation_height) {
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero size 2D op.\n");
++ return;
+ }
+
+- switch (format_flags) {
+- case 0:
+- FILL_RECT(1, uint8_t);
+- break;
+- case 1:
+- color = cpu_to_le16(color);
+- FILL_RECT(2, uint16_t);
+- break;
+- case 2:
++ if (rtl) {
++ dst_x -= operation_width - 1;
++ dst_y -= operation_height - 1;
++ }
++
++ if (dst_base >= get_local_mem_size(s) || dst_base +
++ (dst_x + operation_width + (dst_y + operation_height) * (dst_width + operation_width)) *
++ (1 << format_flags) >= get_local_mem_size(s)) {
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: 2D op dest is outside vram.\n");
++ return;
++ }
++
++ switch (operation) {
++ case 0: /* BitBlt */
++ if (!src_width) {
++ qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero src pitch.\n");
++ return;
++ }
++
++ if (rtl) {
++ src_x -= operation_width - 1;
++ src_y -= operation_height - 1;
++ }
++
++ if (src_base >= get_local_mem_size(s) || src_base +
++ (src_x + operation_width + (src_y + operation_height) * (src_width + operation_width)) *
++ (1 << format_flags) >= get_local_mem_size(s)) {
++ qemu_log_mask(LOG_GUEST_ERROR,
++ "sm501: 2D op src is outside vram.\n");
++ return;
++ }
++
++ if ((rop_mode && rop == 0x5) || (!rop_mode && rop == 0x55)) {
++ /* Invert dest, is there a way to do this with pixman? */
++ unsigned int x, y, i;
++ uint8_t *d = s->local_mem + dst_base;
++
++ for (y = 0; y < operation_height; y++) {
++ i = (dst_x + (dst_y + y) * dst_width) * (1 << format_flags);
++ for (x = 0; x < operation_width; x++, i += (1 << format_flags)) {
++ switch (format_flags) {
++ case 0:
++ d[i] = ~d[i];
++ break;
++ case 1:
++ *(uint16_t *)&d[i] = ~*(uint16_t *)&d[i];
++ break;
++ case 2:
++ *(uint32_t *)&d[i] = ~*(uint32_t *)&d[i];
++ break;
++ }
++ }
++ }
++ } else {
++ /* Do copy src for unimplemented ops, better than unpainted area */
++ if ((rop_mode && (rop != 0xc || rop2_source_is_pattern)) ||
++ (!rop_mode && rop != 0xcc)) {
++ qemu_log_mask(LOG_UNIMP,
++ "sm501: rop%d op %x%s not implemented\n",
++ (rop_mode ? 2 : 3), rop,
++ (rop2_source_is_pattern ?
++ " with pattern source" : ""));
++ }
++ /* Check for overlaps, this could be made more exact */
++ uint32_t sb, se, db, de;
++ sb = src_base + src_x + src_y * (operation_width + src_width);
++ se = sb + operation_width + operation_height * (operation_width + src_width);
++ db = dst_base + dst_x + dst_y * (operation_width + dst_width);
++ de = db + operation_width + operation_height * (operation_width + dst_width);
++ if (rtl && ((db >= sb && db <= se) || (de >= sb && de <= se))) {
++ /* regions may overlap: copy via temporary */
++ int llb = operation_width * (1 << format_flags);
++ int tmp_stride = DIV_ROUND_UP(llb, sizeof(uint32_t));
++ uint32_t *tmp = g_malloc(tmp_stride * sizeof(uint32_t) *
++ operation_height);
++ pixman_blt((uint32_t *)&s->local_mem[src_base], tmp,
++ src_width * (1 << format_flags) / sizeof(uint32_t),
++ tmp_stride, 8 * (1 << format_flags), 8 * (1 << format_flags),
++ src_x, src_y, 0, 0, operation_width, operation_height);
++ pixman_blt(tmp, (uint32_t *)&s->local_mem[dst_base],
++ tmp_stride,
++ dst_width * (1 << format_flags) / sizeof(uint32_t),
++ 8 * (1 << format_flags), 8 * (1 << format_flags),
++ 0, 0, dst_x, dst_y, operation_width, operation_height);
++ g_free(tmp);
++ } else {
++ pixman_blt((uint32_t *)&s->local_mem[src_base],
++ (uint32_t *)&s->local_mem[dst_base],
++ src_width * (1 << format_flags) / sizeof(uint32_t),
++ dst_width * (1 << format_flags) / sizeof(uint32_t),
++ 8 * (1 << format_flags), 8 * (1 << format_flags),
++ src_x, src_y, dst_x, dst_y, operation_width, operation_height);
++ }
++ }
++ break;
++
++ case 1: /* Rectangle Fill */
++ if (format_flags == 2) {
+ color = cpu_to_le32(color);
+- FILL_RECT(4, uint32_t);
+- break;
++ } else if (format_flags == 1) {
++ color = cpu_to_le16(color);
+ }
++
++ pixman_fill((uint32_t *)&s->local_mem[dst_base],
++ dst_width * (1 << format_flags) / sizeof(uint32_t),
++ 8 * (1 << format_flags), dst_x, dst_y, operation_width, operation_height, color);
+ break;
+
+ default:
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13765.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13765.patch
new file mode 100644
index 0000000000..9014ba0f13
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13765.patch
@@ -0,0 +1,48 @@
+From e423455c4f23a1a828901c78fe6d03b7dde79319 Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Wed, 25 Sep 2019 14:16:43 +0200
+Subject: [PATCH] hw/core/loader: Fix possible crash in rom_copy()
+
+Both, "rom->addr" and "addr" are derived from the binary image
+that can be loaded with the "-kernel" paramer. The code in
+rom_copy() then calculates:
+
+ d = dest + (rom->addr - addr);
+
+and uses "d" as destination in a memcpy() some lines later. Now with
+bad kernel images, it is possible that rom->addr is smaller than addr,
+thus "rom->addr - addr" gets negative and the memcpy() then tries to
+copy contents from the image to a bad memory location. This could
+maybe be used to inject code from a kernel image into the QEMU binary,
+so we better fix it with an additional sanity check here.
+
+Cc: qemu-stable@nongnu.org
+Reported-by: Guangming Liu
+Buglink: https://bugs.launchpad.net/qemu/+bug/1844635
+Message-Id: <20190925130331.27825-1-thuth@redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=patch;h=e423455c4f23a1a828901c78fe6d03b7dde79319]
+CVE: CVE-2020-13765
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ hw/core/loader.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/core/loader.c b/hw/core/loader.c
+index 0d60219..5099f27 100644
+--- a/hw/core/loader.c
++++ b/hw/core/loader.c
+@@ -1281,7 +1281,7 @@ int rom_copy(uint8_t *dest, hwaddr addr, size_t size)
+ if (rom->addr + rom->romsize < addr) {
+ continue;
+ }
+- if (rom->addr > end) {
++ if (rom->addr > end || rom->addr < addr) {
+ break;
+ }
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-14364.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-14364.patch
new file mode 100644
index 0000000000..a109ac08d6
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-14364.patch
@@ -0,0 +1,93 @@
+From b946434f2659a182afc17e155be6791ebfb302eb Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Tue, 25 Aug 2020 07:36:36 +0200
+Subject: [PATCH] usb: fix setup_len init (CVE-2020-14364)
+
+Store calculated setup_len in a local variable, verify it, and only
+write it to the struct (USBDevice->setup_len) in case it passed the
+sanity checks.
+
+This prevents other code (do_token_{in,out} functions specifically)
+from working with invalid USBDevice->setup_len values and overrunning
+the USBDevice->setup_buf[] buffer.
+
+Fixes: CVE-2020-14364
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Tested-by: Gonglei <arei.gonglei@huawei.com>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Message-id: 20200825053636.29648-1-kraxel@redhat.com
+
+Upstream-Status: Backport
+CVE: CVE-2020-14364
+[https://git.qemu.org/?p=qemu.git;a=patch;h=b946434f2659a182afc17e155be6791ebfb302eb]
+Signed-off-by: Li Wang <li.wang@windriver.com>
+---
+ hw/usb/core.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/hw/usb/core.c b/hw/usb/core.c
+index 5abd128..5234dcc 100644
+--- a/hw/usb/core.c
++++ b/hw/usb/core.c
+@@ -129,6 +129,7 @@ void usb_wakeup(USBEndpoint *ep, unsigned int stream)
+ static void do_token_setup(USBDevice *s, USBPacket *p)
+ {
+ int request, value, index;
++ unsigned int setup_len;
+
+ if (p->iov.size != 8) {
+ p->status = USB_RET_STALL;
+@@ -138,14 +139,15 @@ static void do_token_setup(USBDevice *s, USBPacket *p)
+ usb_packet_copy(p, s->setup_buf, p->iov.size);
+ s->setup_index = 0;
+ p->actual_length = 0;
+- s->setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6];
+- if (s->setup_len > sizeof(s->data_buf)) {
++ setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6];
++ if (setup_len > sizeof(s->data_buf)) {
+ fprintf(stderr,
+ "usb_generic_handle_packet: ctrl buffer too small (%d > %zu)\n",
+- s->setup_len, sizeof(s->data_buf));
++ setup_len, sizeof(s->data_buf));
+ p->status = USB_RET_STALL;
+ return;
+ }
++ s->setup_len = setup_len;
+
+ request = (s->setup_buf[0] << 8) | s->setup_buf[1];
+ value = (s->setup_buf[3] << 8) | s->setup_buf[2];
+@@ -259,26 +261,28 @@ static void do_token_out(USBDevice *s, USBPacket *p)
+ static void do_parameter(USBDevice *s, USBPacket *p)
+ {
+ int i, request, value, index;
++ unsigned int setup_len;
+
+ for (i = 0; i < 8; i++) {
+ s->setup_buf[i] = p->parameter >> (i*8);
+ }
+
+ s->setup_state = SETUP_STATE_PARAM;
+- s->setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6];
+ s->setup_index = 0;
+
+ request = (s->setup_buf[0] << 8) | s->setup_buf[1];
+ value = (s->setup_buf[3] << 8) | s->setup_buf[2];
+ index = (s->setup_buf[5] << 8) | s->setup_buf[4];
+
+- if (s->setup_len > sizeof(s->data_buf)) {
++ setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6];
++ if (setup_len > sizeof(s->data_buf)) {
+ fprintf(stderr,
+ "usb_generic_handle_packet: ctrl buffer too small (%d > %zu)\n",
+- s->setup_len, sizeof(s->data_buf));
++ setup_len, sizeof(s->data_buf));
+ p->status = USB_RET_STALL;
+ return;
+ }
++ s->setup_len = setup_len;
+
+ if (p->pid == USB_TOKEN_OUT) {
+ usb_packet_copy(p, s->data_buf, s->setup_len);
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15863.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15863.patch
new file mode 100644
index 0000000000..9927584d11
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15863.patch
@@ -0,0 +1,64 @@
+From 5519724a13664b43e225ca05351c60b4468e4555 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Fri, 10 Jul 2020 11:19:41 +0200
+Subject: [PATCH] hw/net/xgmac: Fix buffer overflow in xgmac_enet_send()
+
+A buffer overflow issue was reported by Mr. Ziming Zhang, CC'd here. It
+occurs while sending an Ethernet frame due to missing break statements
+and improper checking of the buffer size.
+
+Reported-by: Ziming Zhang <ezrakiez@gmail.com>
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+CVE: CVE-2020-15863
+Upstream-Status: Backport
+[https://git.qemu.org/?p=qemu.git;a=commit;h=5519724a13664b43e225ca05351c60b4468e4555]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+Signed-off-by: Li Wang <li.wang@windriver.com>
+---
+ hw/net/xgmac.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/hw/net/xgmac.c b/hw/net/xgmac.c
+index f49df95..f496f7e 100644
+--- a/hw/net/xgmac.c
++++ b/hw/net/xgmac.c
+@@ -217,21 +217,31 @@ static void xgmac_enet_send(XgmacState *s)
+ }
+ len = (bd.buffer1_size & 0xfff) + (bd.buffer2_size & 0xfff);
+
++ /*
++ * FIXME: these cases of malformed tx descriptors (bad sizes)
++ * should probably be reported back to the guest somehow
++ * rather than simply silently stopping processing, but we
++ * don't know what the hardware does in this situation.
++ * This will only happen for buggy guests anyway.
++ */
+ if ((bd.buffer1_size & 0xfff) > 2048) {
+ DEBUGF_BRK("qemu:%s:ERROR...ERROR...ERROR... -- "
+ "xgmac buffer 1 len on send > 2048 (0x%x)\n",
+ __func__, bd.buffer1_size & 0xfff);
++ break;
+ }
+ if ((bd.buffer2_size & 0xfff) != 0) {
+ DEBUGF_BRK("qemu:%s:ERROR...ERROR...ERROR... -- "
+ "xgmac buffer 2 len on send != 0 (0x%x)\n",
+ __func__, bd.buffer2_size & 0xfff);
++ break;
+ }
+- if (len >= sizeof(frame)) {
++ if (frame_size + len >= sizeof(frame)) {
+ DEBUGF_BRK("qemu:%s: buffer overflow %d read into %zu "
+- "buffer\n" , __func__, len, sizeof(frame));
++ "buffer\n" , __func__, frame_size + len, sizeof(frame));
+ DEBUGF_BRK("qemu:%s: buffer1.size=%d; buffer2.size=%d\n",
+ __func__, bd.buffer1_size, bd.buffer2_size);
++ break;
+ }
+
+ cpu_physical_memory_read(bd.buffer1_addr, ptr, len);
+--
+1.9.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-16092.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-16092.patch
new file mode 100644
index 0000000000..8ce01e26ad
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-16092.patch
@@ -0,0 +1,49 @@
+From 035e69b063835a5fd23cacabd63690a3d84532a8 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Sat, 1 Aug 2020 18:42:38 +0200
+Subject: [PATCH] hw/net/net_tx_pkt: fix assertion failure in
+ net_tx_pkt_add_raw_fragment()
+
+An assertion failure issue was found in the code that processes network
+packets
+while adding data fragments into the packet context. It could be abused
+by a
+malicious guest to abort the QEMU process on the host. This patch
+replaces the
+affected assert() with a conditional statement, returning false if the
+current
+data fragment exceeds max_raw_frags.
+
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Ziming Zhang <ezrakiez@gmail.com>
+Reviewed-by: Dmitry Fleytman <dmitry.fleytman@gmail.com>
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-16092
+[https://git.qemu.org/?p=qemu.git;a=commit;h=035e69b063835a5fd23cacabd63690a3d84532a8]
+Signed-off-by: Li Wang <li.wang@windriver.com>
+---
+ hw/net/net_tx_pkt.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c
+index 162f802..54d4c3b 100644
+--- a/hw/net/net_tx_pkt.c
++++ b/hw/net/net_tx_pkt.c
+@@ -379,7 +379,10 @@ bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
+ hwaddr mapped_len = 0;
+ struct iovec *ventry;
+ assert(pkt);
+- assert(pkt->max_raw_frags > pkt->raw_frags);
++
++ if (pkt->raw_frags >= pkt->max_raw_frags) {
++ return false;
++ }
+
+ if (!len) {
+ return true;
+--
+2.17.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-1711.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-1711.patch
new file mode 100644
index 0000000000..aa7bc82329
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-1711.patch
@@ -0,0 +1,64 @@
+From 693fd2acdf14dd86c0bf852610f1c2cca80a74dc Mon Sep 17 00:00:00 2001
+From: Felipe Franciosi <felipe@nutanix.com>
+Date: Thu, 23 Jan 2020 12:44:59 +0000
+Subject: [PATCH] iscsi: Cap block count from GET LBA STATUS (CVE-2020-1711)
+
+When querying an iSCSI server for the provisioning status of blocks (via
+GET LBA STATUS), Qemu only validates that the response descriptor zero's
+LBA matches the one requested. Given the SCSI spec allows servers to
+respond with the status of blocks beyond the end of the LUN, Qemu may
+have its heap corrupted by clearing/setting too many bits at the end of
+its allocmap for the LUN.
+
+A malicious guest in control of the iSCSI server could carefully program
+Qemu's heap (by selectively setting the bitmap) and then smash it.
+
+This limits the number of bits that iscsi_co_block_status() will try to
+update in the allocmap so it can't overflow the bitmap.
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=patch;h=693fd2acdf14dd86c0bf852610f1c2cca80a74dc]
+CVE: CVE-2020-1711
+
+Fixes: CVE-2020-1711
+Cc: qemu-stable@nongnu.org
+Signed-off-by: Felipe Franciosi <felipe@nutanix.com>
+Signed-off-by: Peter Turschmid <peter.turschm@nutanix.com>
+Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com>
+Signed-off-by: Kevin Wolf <kwolf@redhat.com>
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ block/iscsi.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/block/iscsi.c b/block/iscsi.c
+index 2aea7e3..cbd5729 100644
+--- a/block/iscsi.c
++++ b/block/iscsi.c
+@@ -701,7 +701,7 @@ static int coroutine_fn iscsi_co_block_status(BlockDriverState *bs,
+ struct scsi_get_lba_status *lbas = NULL;
+ struct scsi_lba_status_descriptor *lbasd = NULL;
+ struct IscsiTask iTask;
+- uint64_t lba;
++ uint64_t lba, max_bytes;
+ int ret;
+
+ iscsi_co_init_iscsitask(iscsilun, &iTask);
+@@ -721,6 +721,7 @@ static int coroutine_fn iscsi_co_block_status(BlockDriverState *bs,
+ }
+
+ lba = offset / iscsilun->block_size;
++ max_bytes = (iscsilun->num_blocks - lba) * iscsilun->block_size;
+
+ qemu_mutex_lock(&iscsilun->mutex);
+ retry:
+@@ -764,7 +765,7 @@ retry:
+ goto out_unlock;
+ }
+
+- *pnum = (int64_t) lbasd->num_blocks * iscsilun->block_size;
++ *pnum = MIN((int64_t) lbasd->num_blocks * iscsilun->block_size, max_bytes);
+
+ if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED ||
+ lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) {
+--
+1.8.3.1
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-1.patch
new file mode 100644
index 0000000000..df6bca6db6
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-1.patch
@@ -0,0 +1,44 @@
+From b2663d527a1992ba98c0266458b21ada3b9d0d2e Mon Sep 17 00:00:00 2001
+From: Changqing Li <changqing.li@windriver.com>
+Date: Thu, 27 Feb 2020 12:07:35 +0800
+Subject: [PATCH] tcp_emu: Fix oob access
+
+The main loop only checks for one available byte, while we sometimes
+need two bytes.
+
+CVE: CVE-2020-7039
+Upstream-Status: Backport
+[https://gitlab.freedesktop.org/slirp/libslirp/commit/2655fffed7a9e765bcb4701dd876e9dab975f289]
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ slirp/src/tcp_subr.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/slirp/src/tcp_subr.c b/slirp/src/tcp_subr.c
+index d6dd133..4bea2d4 100644
+--- a/slirp/src/tcp_subr.c
++++ b/slirp/src/tcp_subr.c
+@@ -886,6 +886,8 @@ int tcp_emu(struct socket *so, struct mbuf *m)
+ break;
+
+ case 5:
++ if (bptr == m->m_data + m->m_len - 1)
++ return 1; /* We need two bytes */
+ /*
+ * The difference between versions 1.0 and
+ * 2.0 is here. For future versions of
+@@ -901,6 +903,10 @@ int tcp_emu(struct socket *so, struct mbuf *m)
+ /* This is the field containing the port
+ * number that RA-player is listening to.
+ */
++
++ if (bptr == m->m_data + m->m_len - 1)
++ return 1; /* We need two bytes */
++
+ lport = (((uint8_t *)bptr)[0] << 8) + ((uint8_t *)bptr)[1];
+ if (lport < 6970)
+ lport += 256; /* don't know why */
+--
+2.7.4
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-2.patch
new file mode 100644
index 0000000000..4a00fa2afd
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-2.patch
@@ -0,0 +1,59 @@
+From 8f67e76e4148e37f3d8d2bcbdee7417fdedb7669 Mon Sep 17 00:00:00 2001
+From: Changqing Li <changqing.li@windriver.com>
+Date: Thu, 27 Feb 2020 12:10:34 +0800
+Subject: [PATCH] slirp: use correct size while emulating commands
+
+While emulating services in tcp_emu(), it uses 'mbuf' size
+'m->m_size' to write commands via snprintf(3). Use M_FREEROOM(m)
+size to avoid possible OOB access.
+Signed-off-by: default avatarPrasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: Samuel Thibault's avatarSamuel Thibault
+<samuel.thibault@ens-lyon.org>
+Message-Id: <20200109094228.79764-3-ppandit@redhat.com>
+
+CVE: CVE-2020-7039
+Upstream-Status: Backport
+[https://gitlab.freedesktop.org/slirp/libslirp/commit/82ebe9c370a0e2970fb5695aa19aa5214a6a1c80]
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ slirp/src/tcp_subr.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/slirp/src/tcp_subr.c b/slirp/src/tcp_subr.c
+index 4bea2d4..e8ed4ef 100644
+--- a/slirp/src/tcp_subr.c
++++ b/slirp/src/tcp_subr.c
+@@ -696,7 +696,7 @@ int tcp_emu(struct socket *so, struct mbuf *m)
+ n4 = (laddr & 0xff);
+
+ m->m_len = bptr - m->m_data; /* Adjust length */
+- m->m_len += snprintf(bptr, m->m_size - m->m_len,
++ m->m_len += snprintf(bptr, M_FREEROOM(m),
+ "ORT %d,%d,%d,%d,%d,%d\r\n%s", n1, n2, n3, n4,
+ n5, n6, x == 7 ? buff : "");
+ return 1;
+@@ -731,8 +731,7 @@ int tcp_emu(struct socket *so, struct mbuf *m)
+ n4 = (laddr & 0xff);
+
+ m->m_len = bptr - m->m_data; /* Adjust length */
+- m->m_len +=
+- snprintf(bptr, m->m_size - m->m_len,
++ m->m_len += snprintf(bptr, M_FREEROOM(m),
+ "27 Entering Passive Mode (%d,%d,%d,%d,%d,%d)\r\n%s",
+ n1, n2, n3, n4, n5, n6, x == 7 ? buff : "");
+
+@@ -758,8 +757,8 @@ int tcp_emu(struct socket *so, struct mbuf *m)
+ if (m->m_data[m->m_len - 1] == '\0' && lport != 0 &&
+ (so = tcp_listen(slirp, INADDR_ANY, 0, so->so_laddr.s_addr,
+ htons(lport), SS_FACCEPTONCE)) != NULL)
+- m->m_len =
+- snprintf(m->m_data, m->m_size, "%d", ntohs(so->so_fport)) + 1;
++ m->m_len = snprintf(m->m_data, M_ROOM(m),
++ "%d", ntohs(so->so_fport)) + 1;
+ return 1;
+
+ case EMU_IRC:
+--
+2.7.4
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-3.patch
new file mode 100644
index 0000000000..70ce480d80
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-7039-3.patch
@@ -0,0 +1,64 @@
+From 0b03959b72036afce151783720d9e54988cf76ef Mon Sep 17 00:00:00 2001
+From: Changqing Li <changqing.li@windriver.com>
+Date: Thu, 27 Feb 2020 12:15:04 +0800
+Subject: [PATCH] slirp: use correct size while emulating IRC commands
+
+While emulating IRC DCC commands, tcp_emu() uses 'mbuf' size
+'m->m_size' to write DCC commands via snprintf(3). This may
+lead to OOB write access, because 'bptr' points somewhere in
+the middle of 'mbuf' buffer, not at the start. Use M_FREEROOM(m)
+size to avoid OOB access.
+Reported-by: default avatarVishnu Dev TJ <vishnudevtj@gmail.com>
+Signed-off-by: default avatarPrasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Samuel Thibault's avatarSamuel Thibault
+<samuel.thibault@ens-lyon.org>
+Message-Id: <20200109094228.79764-2-ppandit@redhat.com>
+
+CVE: CVE-2020-7039
+Upstream-Status: Backport
+[https://gitlab.freedesktop.org/slirp/libslirp/commit/ce131029d6d4a405cb7d3ac6716d03e58fb4a5d9]
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ slirp/src/tcp_subr.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/slirp/src/tcp_subr.c b/slirp/src/tcp_subr.c
+index e8ed4ef..3a4a8ee 100644
+--- a/slirp/src/tcp_subr.c
++++ b/slirp/src/tcp_subr.c
+@@ -777,7 +777,8 @@ int tcp_emu(struct socket *so, struct mbuf *m)
+ return 1;
+ }
+ m->m_len = bptr - m->m_data; /* Adjust length */
+- m->m_len += snprintf(bptr, m->m_size, "DCC CHAT chat %lu %u%c\n",
++ m->m_len += snprintf(bptr, M_FREEROOM(m),
++ "DCC CHAT chat %lu %u%c\n",
+ (unsigned long)ntohl(so->so_faddr.s_addr),
+ ntohs(so->so_fport), 1);
+ } else if (sscanf(bptr, "DCC SEND %256s %u %u %u", buff, &laddr, &lport,
+@@ -787,8 +788,8 @@ int tcp_emu(struct socket *so, struct mbuf *m)
+ return 1;
+ }
+ m->m_len = bptr - m->m_data; /* Adjust length */
+- m->m_len +=
+- snprintf(bptr, m->m_size, "DCC SEND %s %lu %u %u%c\n", buff,
++ m->m_len += snprintf(bptr, M_FREEROOM(m),
++ "DCC SEND %s %lu %u %u%c\n", buff,
+ (unsigned long)ntohl(so->so_faddr.s_addr),
+ ntohs(so->so_fport), n1, 1);
+ } else if (sscanf(bptr, "DCC MOVE %256s %u %u %u", buff, &laddr, &lport,
+@@ -798,8 +799,8 @@ int tcp_emu(struct socket *so, struct mbuf *m)
+ return 1;
+ }
+ m->m_len = bptr - m->m_data; /* Adjust length */
+- m->m_len +=
+- snprintf(bptr, m->m_size, "DCC MOVE %s %lu %u %u%c\n", buff,
++ m->m_len += snprintf(bptr, M_FREEROOM(m),
++ "DCC MOVE %s %lu %u %u%c\n", buff,
+ (unsigned long)ntohl(so->so_faddr.s_addr),
+ ntohs(so->so_fport), n1, 1);
+ }
+--
+2.7.4
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-7211.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-7211.patch
new file mode 100644
index 0000000000..11be4c92e7
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-7211.patch
@@ -0,0 +1,46 @@
+From 14ec36e107a8c9af7d0a80c3571fe39b291ff1d4 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Mon, 13 Jan 2020 17:44:31 +0530
+Subject: [PATCH] slirp: tftp: restrict relative path access
+
+tftp restricts relative or directory path access on Linux systems.
+Apply same restrictions on Windows systems too. It helps to avoid
+directory traversal issue.
+
+Fixes: https://bugs.launchpad.net/qemu/+bug/1812451
+Reported-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Message-Id: <20200113121431.156708-1-ppandit@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/slirp/libslirp/-/commit/14ec36e107a8c9af7d0a80c3571fe39b291ff1d4.patch]
+CVE: CVE-2020-7211
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ slirp/src/tftp.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/slirp/src/tftp.c b/slirp/src/tftp.c
+index 093c2e0..e52e71b 100644
+--- a/slirp/src/tftp.c
++++ b/slirp/src/tftp.c
+@@ -344,8 +344,13 @@ static void tftp_handle_rrq(Slirp *slirp, struct sockaddr_storage *srcsas,
+ k += 6; /* skipping octet */
+
+ /* do sanity checks on the filename */
+- if (!strncmp(req_fname, "../", 3) ||
+- req_fname[strlen(req_fname) - 1] == '/' || strstr(req_fname, "/../")) {
++ if (
++#ifdef G_OS_WIN32
++ strstr(req_fname, "..\\") ||
++ req_fname[strlen(req_fname) - 1] == '\\' ||
++#endif
++ strstr(req_fname, "../") ||
++ req_fname[strlen(req_fname) - 1] == '/') {
+ tftp_send_error(spt, 2, "Access violation", tp);
+ return;
+ }
+--
+2.24.1
+
diff --git a/meta/recipes-devtools/rsync/rsync_3.1.3.bb b/meta/recipes-devtools/rsync/rsync_3.1.3.bb
index ffb1d061c0..152ff02a25 100644
--- a/meta/recipes-devtools/rsync/rsync_3.1.3.bb
+++ b/meta/recipes-devtools/rsync/rsync_3.1.3.bb
@@ -20,6 +20,9 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \
SRC_URI[md5sum] = "1581a588fde9d89f6bc6201e8129afaf"
SRC_URI[sha256sum] = "55cc554efec5fdaad70de921cd5a5eeb6c29a95524c715f3bbf849235b0800c0"
+# -16548 required for v3.1.3pre1. Already in v3.1.3.
+CVE_CHECK_WHITELIST += " CVE-2017-16548 "
+
inherit autotools
PACKAGECONFIG ??= "acl attr \
diff --git a/meta/recipes-devtools/ruby/ruby/fix-CVE-2019-16254.patch b/meta/recipes-devtools/ruby/ruby/fix-CVE-2019-16254.patch
new file mode 100644
index 0000000000..704c850c50
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/fix-CVE-2019-16254.patch
@@ -0,0 +1,106 @@
+From 18d5289b4579822e391b3f5c16541e6552e9f06c Mon Sep 17 00:00:00 2001
+From: Yusuke Endoh <mame@ruby-lang.org>
+Date: Tue, 1 Oct 2019 12:29:18 +0900
+Subject: [PATCH] WEBrick: prevent response splitting and header injection
+
+This is a follow up to d9d4a28f1cdd05a0e8dabb36d747d40bbcc30f16.
+The commit prevented CRLR, but did not address an isolated CR or an
+isolated LF.
+
+Upstream-Status: Backport https://github.com/ruby/ruby/commit/3ce238b5f9795581eb84114dcfbdf4aa086bfecc
+CVE: CVE-2019-16254
+
+Co-Authored-By: NARUSE, Yui <naruse@airemix.jp>
+Signed-off-by: Rahul Chauhan <rahulchauhankitps@gmail.com>
+---
+ lib/webrick/httpresponse.rb | 3 ++-
+ test/webrick/test_httpresponse.rb | 46 +++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 46 insertions(+), 3 deletions(-)
+
+diff --git a/lib/webrick/httpresponse.rb b/lib/webrick/httpresponse.rb
+index 6d77692..d26324c 100644
+--- a/lib/webrick/httpresponse.rb
++++ b/lib/webrick/httpresponse.rb
+@@ -367,7 +367,8 @@ def set_error(ex, backtrace=false)
+ private
+
+ def check_header(header_value)
+- if header_value =~ /\r\n/
++ header_value = header_value.to_s
++ if /[\r\n]/ =~ header_value
+ raise InvalidHeader
+ else
+ header_value
+diff --git a/test/webrick/test_httpresponse.rb b/test/webrick/test_httpresponse.rb
+index 6263e0a..24a6968 100644
+--- a/test/webrick/test_httpresponse.rb
++++ b/test/webrick/test_httpresponse.rb
+@@ -29,7 +29,7 @@ def setup
+ @res.keep_alive = true
+ end
+
+- def test_prevent_response_splitting_headers
++ def test_prevent_response_splitting_headers_crlf
+ res['X-header'] = "malicious\r\nCookie: hack"
+ io = StringIO.new
+ res.send_response io
+@@ -39,7 +39,7 @@ def test_prevent_response_splitting_headers
+ refute_match 'hack', io.string
+ end
+
+- def test_prevent_response_splitting_cookie_headers
++ def test_prevent_response_splitting_cookie_headers_crlf
+ user_input = "malicious\r\nCookie: hack"
+ res.cookies << WEBrick::Cookie.new('author', user_input)
+ io = StringIO.new
+@@ -50,6 +50,48 @@ def test_prevent_response_splitting_cookie_headers
+ refute_match 'hack', io.string
+ end
+
++ def test_prevent_response_splitting_headers_cr
++ res['X-header'] = "malicious\rCookie: hack"
++ io = StringIO.new
++ res.send_response io
++ io.rewind
++ res = Net::HTTPResponse.read_new(Net::BufferedIO.new(io))
++ assert_equal '500', res.code
++ refute_match 'hack', io.string
++ end
++
++ def test_prevent_response_splitting_cookie_headers_cr
++ user_input = "malicious\rCookie: hack"
++ res.cookies << WEBrick::Cookie.new('author', user_input)
++ io = StringIO.new
++ res.send_response io
++ io.rewind
++ res = Net::HTTPResponse.read_new(Net::BufferedIO.new(io))
++ assert_equal '500', res.code
++ refute_match 'hack', io.string
++ end
++
++ def test_prevent_response_splitting_headers_lf
++ res['X-header'] = "malicious\nCookie: hack"
++ io = StringIO.new
++ res.send_response io
++ io.rewind
++ res = Net::HTTPResponse.read_new(Net::BufferedIO.new(io))
++ assert_equal '500', res.code
++ refute_match 'hack', io.string
++ end
++
++ def test_prevent_response_splitting_cookie_headers_lf
++ user_input = "malicious\nCookie: hack"
++ res.cookies << WEBrick::Cookie.new('author', user_input)
++ io = StringIO.new
++ res.send_response io
++ io.rewind
++ res = Net::HTTPResponse.read_new(Net::BufferedIO.new(io))
++ assert_equal '500', res.code
++ refute_match 'hack', io.string
++ end
++
+ def test_304_does_not_log_warning
+ res.status = 304
+ res.setup_header
+--
+2.7.4
diff --git a/meta/recipes-devtools/ruby/ruby_2.5.5.bb b/meta/recipes-devtools/ruby/ruby_2.5.5.bb
index 223b0371eb..58bb97f4bd 100644
--- a/meta/recipes-devtools/ruby/ruby_2.5.5.bb
+++ b/meta/recipes-devtools/ruby/ruby_2.5.5.bb
@@ -3,6 +3,7 @@ require ruby.inc
SRC_URI += " \
file://0001-configure.ac-check-finite-isinf-isnan-as-macros-firs.patch \
file://run-ptest \
+ file://fix-CVE-2019-16254.patch \
"
SRC_URI[md5sum] = "7e156fb526b8f4bb1b30a3dd8a7ce400"
diff --git a/meta/recipes-devtools/strace/strace/Makefile-ptest.patch b/meta/recipes-devtools/strace/strace/Makefile-ptest.patch
index 08fa5c53b8..36e93a2dcf 100644
--- a/meta/recipes-devtools/strace/strace/Makefile-ptest.patch
+++ b/meta/recipes-devtools/strace/strace/Makefile-ptest.patch
@@ -44,6 +44,6 @@ index 825c989..4623c48 100644
+ done
+ for file in $(EXTRA_DIST); do \
+ install $(srcdir)/$$file $(DESTDIR)/$(TESTDIR); \
-+ sed -i -e 's/$${srcdir=.}/./g' $(DESTDIR)/$(TESTDIR)/$$file; \
++ #sed -i -e 's/$${srcdir=.}/./g' $(DESTDIR)/$(TESTDIR)/$$file; \
+ done
+ for i in net scm_rights-fd rt_sigaction; do sed -i -e 's/$$srcdir/./g' $(DESTDIR)/$(TESTDIR)/$$i.test; done
diff --git a/meta/recipes-devtools/strace/strace/run-ptest b/meta/recipes-devtools/strace/strace/run-ptest
index 2fed984e90..4660207220 100755
--- a/meta/recipes-devtools/strace/strace/run-ptest
+++ b/meta/recipes-devtools/strace/strace/run-ptest
@@ -1,3 +1,6 @@
#!/bin/sh
-export TIMEOUT_DURATION=30
-make -B -C tests -k test-suite.log
+export TIMEOUT_DURATION=120
+chown nobody tests
+chown nobody tests/*
+chown nobody ../ptest
+su nobody -c "make -B -C tests -k test-suite.log"
diff --git a/meta/recipes-extended/cpio/cpio-2.12/CVE-2019-14866.patch b/meta/recipes-extended/cpio/cpio-2.12/CVE-2019-14866.patch
new file mode 100644
index 0000000000..5d587fc832
--- /dev/null
+++ b/meta/recipes-extended/cpio/cpio-2.12/CVE-2019-14866.patch
@@ -0,0 +1,316 @@
+CVE: CVE-2019-14866
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/cpio.git/commit/?id=7554e3e42cd72f6f8304410c47fe6f8918e9bfd7]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+
+From a052401293e45a13cded5959b258204dae6d0af5 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Sun, 3 Nov 2019 23:59:39 +0200
+Subject: [PATCH] Fix CVE-2019-14866
+
+* src/copyout.c (to_ascii): Additional argument nul controls whether
+to add the terminating nul character.
+(field_width_error): Improve diagnostics: print the actual and the
+maximum allowed field value.
+* src/extern.h (to_ascii, field_width_error): New prototypes.
+* src/tar.c (to_oct): Remove.
+(to_oct_or_error): New function.
+(TO_OCT): New macro.
+(write_out_tar_header): Use TO_OCT and to_ascii. Return 0 on
+success, 1 on error.
+---
+ src/copyout.c | 49 ++++++++++++++++++++++--------------
+ src/extern.h | 15 +++++++++--
+ src/tar.c | 69 ++++++++++++++++++++++++---------------------------
+ 3 files changed, 75 insertions(+), 58 deletions(-)
+
+diff --git a/src/copyout.c b/src/copyout.c
+index 1f0987a..1ae5477 100644
+--- a/src/copyout.c
++++ b/src/copyout.c
+@@ -269,26 +269,32 @@ writeout_final_defers (int out_des)
+ so it should be moved to paxutils too.
+ Allowed values for logbase are: 1 (binary), 2, 3 (octal), 4 (hex) */
+ int
+-to_ascii (char *where, uintmax_t v, size_t digits, unsigned logbase)
++to_ascii (char *where, uintmax_t v, size_t digits, unsigned logbase, bool nul)
+ {
+ static char codetab[] = "0123456789ABCDEF";
+- int i = digits;
+-
+- do
++
++ if (nul)
++ where[--digits] = 0;
++ while (digits > 0)
+ {
+- where[--i] = codetab[(v & ((1 << logbase) - 1))];
++ where[--digits] = codetab[(v & ((1 << logbase) - 1))];
+ v >>= logbase;
+ }
+- while (i);
+
+ return v != 0;
+ }
+
+-static void
+-field_width_error (const char *filename, const char *fieldname)
++void
++field_width_error (const char *filename, const char *fieldname,
++ uintmax_t value, size_t width, bool nul)
+ {
+- error (0, 0, _("%s: field width not sufficient for storing %s"),
+- filename, fieldname);
++ char valbuf[UINTMAX_STRSIZE_BOUND + 1];
++ char maxbuf[UINTMAX_STRSIZE_BOUND + 1];
++ error (0, 0, _("%s: value %s %s out of allowed range 0..%s"),
++ filename, fieldname,
++ STRINGIFY_BIGINT (value, valbuf),
++ STRINGIFY_BIGINT (MAX_VAL_WITH_DIGITS (width - nul, LG_8),
++ maxbuf));
+ }
+
+ static void
+@@ -303,7 +309,7 @@ to_ascii_or_warn (char *where, uintmax_t n, size_t digits,
+ unsigned logbase,
+ const char *filename, const char *fieldname)
+ {
+- if (to_ascii (where, n, digits, logbase))
++ if (to_ascii (where, n, digits, logbase, false))
+ field_width_warning (filename, fieldname);
+ }
+
+@@ -312,9 +318,9 @@ to_ascii_or_error (char *where, uintmax_t n, size_t digits,
+ unsigned logbase,
+ const char *filename, const char *fieldname)
+ {
+- if (to_ascii (where, n, digits, logbase))
++ if (to_ascii (where, n, digits, logbase, false))
+ {
+- field_width_error (filename, fieldname);
++ field_width_error (filename, fieldname, n, digits, false);
+ return 1;
+ }
+ return 0;
+@@ -371,7 +377,7 @@ write_out_new_ascii_header (const char *magic_string,
+ _("name size")))
+ return 1;
+ p += 8;
+- to_ascii (p, file_hdr->c_chksum & 0xffffffff, 8, LG_16);
++ to_ascii (p, file_hdr->c_chksum & 0xffffffff, 8, LG_16, false);
+
+ tape_buffered_write (ascii_header, out_des, sizeof ascii_header);
+
+@@ -388,7 +394,7 @@ write_out_old_ascii_header (dev_t dev, dev_t rdev,
+ char ascii_header[76];
+ char *p = ascii_header;
+
+- to_ascii (p, file_hdr->c_magic, 6, LG_8);
++ to_ascii (p, file_hdr->c_magic, 6, LG_8, false);
+ p += 6;
+ to_ascii_or_warn (p, dev, 6, LG_8, file_hdr->c_name, _("device number"));
+ p += 6;
+@@ -492,7 +498,10 @@ write_out_binary_header (dev_t rdev,
+ short_hdr.c_namesize = file_hdr->c_namesize & 0xFFFF;
+ if (short_hdr.c_namesize != file_hdr->c_namesize)
+ {
+- field_width_error (file_hdr->c_name, _("name size"));
++ char maxbuf[UINTMAX_STRSIZE_BOUND + 1];
++ error (0, 0, _("%s: value %s %s out of allowed range 0..%u"),
++ file_hdr->c_name, _("name size"),
++ STRINGIFY_BIGINT (file_hdr->c_namesize, maxbuf), 0xFFFFu);
+ return 1;
+ }
+
+@@ -502,7 +511,10 @@ write_out_binary_header (dev_t rdev,
+ if (((off_t)short_hdr.c_filesizes[0] << 16) + short_hdr.c_filesizes[1]
+ != file_hdr->c_filesize)
+ {
+- field_width_error (file_hdr->c_name, _("file size"));
++ char maxbuf[UINTMAX_STRSIZE_BOUND + 1];
++ error (0, 0, _("%s: value %s %s out of allowed range 0..%lu"),
++ file_hdr->c_name, _("file size"),
++ STRINGIFY_BIGINT (file_hdr->c_namesize, maxbuf), 0xFFFFFFFFlu);
+ return 1;
+ }
+
+@@ -552,8 +564,7 @@ write_out_header (struct cpio_file_stat *file_hdr, int out_des)
+ error (0, 0, _("%s: file name too long"), file_hdr->c_name);
+ return 1;
+ }
+- write_out_tar_header (file_hdr, out_des); /* FIXME: No error checking */
+- return 0;
++ return write_out_tar_header (file_hdr, out_des);
+
+ case arf_binary:
+ return write_out_binary_header (makedev (file_hdr->c_rdev_maj,
+diff --git a/src/extern.h b/src/extern.h
+index e27d662..f9ef56a 100644
+--- a/src/extern.h
++++ b/src/extern.h
+@@ -117,6 +117,10 @@ void print_name_with_quoting (char *p);
+ /* copyout.c */
+ int write_out_header (struct cpio_file_stat *file_hdr, int out_des);
+ void process_copy_out (void);
++int to_ascii (char *where, uintmax_t v, size_t digits, unsigned logbase,
++ bool nul);
++void field_width_error (const char *filename, const char *fieldname,
++ uintmax_t value, size_t width, bool nul);
+
+ /* copypass.c */
+ void process_copy_pass (void);
+@@ -145,7 +149,7 @@ int make_path (char *argpath, uid_t owner, gid_t group,
+ const char *verbose_fmt_string);
+
+ /* tar.c */
+-void write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des);
++int write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des);
+ int null_block (long *block, int size);
+ void read_in_tar_header (struct cpio_file_stat *file_hdr, int in_des);
+ int otoa (char *s, unsigned long *n);
+@@ -204,9 +208,16 @@ void cpio_safer_name_suffix (char *name, bool link_target,
+ int cpio_create_dir (struct cpio_file_stat *file_hdr, int existing_dir);
+ void change_dir (void);
+
+-/* FIXME: These two defines should be defined in paxutils */
++/* FIXME: The following three should be defined in paxutils */
+ #define LG_8 3
+ #define LG_16 4
++/* The maximum uintmax_t value that can be represented with DIGITS digits,
++ assuming that each digit is BITS_PER_DIGIT wide. */
++#define MAX_VAL_WITH_DIGITS(digits, bits_per_digit) \
++ ((digits) * (bits_per_digit) < sizeof (uintmax_t) * CHAR_BIT \
++ ? ((uintmax_t) 1 << ((digits) * (bits_per_digit))) - 1 \
++ : (uintmax_t) -1)
++
+
+ uintmax_t from_ascii (char const *where, size_t digs, unsigned logbase);
+
+diff --git a/src/tar.c b/src/tar.c
+index a2ce171..ef58027 100644
+--- a/src/tar.c
++++ b/src/tar.c
+@@ -79,36 +79,17 @@ stash_tar_filename (char *prefix, char *filename)
+ return hold_tar_filename;
+ }
+
+-/* Convert a number into a string of octal digits.
+- Convert long VALUE into a DIGITS-digit field at WHERE,
+- including a trailing space and room for a NUL. DIGITS==3 means
+- 1 digit, a space, and room for a NUL.
+-
+- We assume the trailing NUL is already there and don't fill it in.
+- This fact is used by start_header and finish_header, so don't change it!
+-
+- This is be equivalent to:
+- sprintf (where, "%*lo ", digits - 2, value);
+- except that sprintf fills in the trailing NUL and we don't. */
+-
+-static void
+-to_oct (register long value, register int digits, register char *where)
++static int
++to_oct_or_error (uintmax_t value, size_t digits, char *where, char const *field,
++ char const *file)
+ {
+- --digits; /* Leave the trailing NUL slot alone. */
+-
+- /* Produce the digits -- at least one. */
+- do
++ if (to_ascii (where, value, digits, LG_8, true))
+ {
+- where[--digits] = '0' + (char) (value & 7); /* One octal digit. */
+- value >>= 3;
++ field_width_error (file, field, value, digits, true);
++ return 1;
+ }
+- while (digits > 0 && value != 0);
+-
+- /* Add leading zeroes, if necessary. */
+- while (digits > 0)
+- where[--digits] = '0';
++ return 0;
+ }
+-
+
+
+ /* Compute and return a checksum for TAR_HDR,
+@@ -134,10 +115,22 @@ tar_checksum (struct tar_header *tar_hdr)
+ return sum;
+ }
+
++#define TO_OCT(file_hdr, c_fld, digits, tar_hdr, tar_field) \
++ do \
++ { \
++ if (to_oct_or_error (file_hdr -> c_fld, \
++ digits, \
++ tar_hdr -> tar_field, \
++ #tar_field, \
++ file_hdr->c_name)) \
++ return 1; \
++ } \
++ while (0)
++
+ /* Write out header FILE_HDR, including the file name, to file
+ descriptor OUT_DES. */
+
+-void
++int
+ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+ {
+ int name_len;
+@@ -166,11 +159,11 @@ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+
+ /* Ustar standard (POSIX.1-1988) requires the mode to contain only 3 octal
+ digits */
+- to_oct (file_hdr->c_mode & MODE_ALL, 8, tar_hdr->mode);
+- to_oct (file_hdr->c_uid, 8, tar_hdr->uid);
+- to_oct (file_hdr->c_gid, 8, tar_hdr->gid);
+- to_oct (file_hdr->c_filesize, 12, tar_hdr->size);
+- to_oct (file_hdr->c_mtime, 12, tar_hdr->mtime);
++ TO_OCT (file_hdr, c_mode & MODE_ALL, 8, tar_hdr, mode);
++ TO_OCT (file_hdr, c_uid, 8, tar_hdr, uid);
++ TO_OCT (file_hdr, c_gid, 8, tar_hdr, gid);
++ TO_OCT (file_hdr, c_filesize, 12, tar_hdr, size);
++ TO_OCT (file_hdr, c_mtime, 12, tar_hdr, mtime);
+
+ switch (file_hdr->c_mode & CP_IFMT)
+ {
+@@ -182,7 +175,7 @@ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+ strncpy (tar_hdr->linkname, file_hdr->c_tar_linkname,
+ TARLINKNAMESIZE);
+ tar_hdr->typeflag = LNKTYPE;
+- to_oct (0, 12, tar_hdr->size);
++ to_ascii (tar_hdr->size, 0, 12, LG_8, true);
+ }
+ else
+ tar_hdr->typeflag = REGTYPE;
+@@ -208,7 +201,7 @@ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+ than TARLINKNAMESIZE. */
+ strncpy (tar_hdr->linkname, file_hdr->c_tar_linkname,
+ TARLINKNAMESIZE);
+- to_oct (0, 12, tar_hdr->size);
++ to_ascii (tar_hdr->size, 0, 12, LG_8, true);
+ break;
+ #endif /* CP_IFLNK */
+ }
+@@ -227,13 +220,15 @@ write_out_tar_header (struct cpio_file_stat *file_hdr, int out_des)
+ if (name)
+ strcpy (tar_hdr->gname, name);
+
+- to_oct (file_hdr->c_rdev_maj, 8, tar_hdr->devmajor);
+- to_oct (file_hdr->c_rdev_min, 8, tar_hdr->devminor);
++ TO_OCT (file_hdr, c_rdev_maj, 8, tar_hdr, devmajor);
++ TO_OCT (file_hdr, c_rdev_min, 8, tar_hdr, devminor);
+ }
+
+- to_oct (tar_checksum (tar_hdr), 8, tar_hdr->chksum);
++ to_ascii (tar_hdr->chksum, tar_checksum (tar_hdr), 8, LG_8, true);
+
+ tape_buffered_write ((char *) &tar_rec, out_des, TARRECORDSIZE);
++
++ return 0;
+ }
+
+ /* Return nonzero iff all the bytes in BLOCK are NUL.
+--
+2.24.1
+
diff --git a/meta/recipes-extended/cpio/cpio_2.12.bb b/meta/recipes-extended/cpio/cpio_2.12.bb
index 3713bf0b1f..5abe494ebc 100644
--- a/meta/recipes-extended/cpio/cpio_2.12.bb
+++ b/meta/recipes-extended/cpio/cpio_2.12.bb
@@ -11,6 +11,7 @@ SRC_URI = "${GNU_MIRROR}/cpio/cpio-${PV}.tar.gz \
file://0001-Fix-CVE-2015-1197.patch \
file://0001-CVE-2016-2037-1-byte-out-of-bounds-write.patch \
file://0001-Fix-segfault-with-append.patch \
+ file://CVE-2019-14866.patch \
"
SRC_URI[md5sum] = "fc207561a86b63862eea4b8300313e86"
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2019-10216.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2019-10216.patch
new file mode 100644
index 0000000000..9bec7343f5
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2019-10216.patch
@@ -0,0 +1,53 @@
+From 5b85ddd19a8420a1bd2d5529325be35d78e94234 Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Fri, 2 Aug 2019 15:18:26 +0100
+Subject: [PATCH] Bug 701394: protect use of .forceput with executeonly
+
+Upstream-Status: Backport [http://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=5b85ddd19]
+CVE: CVE-2019-10216
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+---
+ Resource/Init/gs_type1.ps | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/Resource/Init/gs_type1.ps b/Resource/Init/gs_type1.ps
+index 6c7735bc0..a039ccee3 100644
+--- a/Resource/Init/gs_type1.ps
++++ b/Resource/Init/gs_type1.ps
+@@ -118,25 +118,25 @@
+ ( to be the same as glyph: ) print 1 index //== exec } if
+ 3 index exch 3 index .forceput
+ % scratch(string) RAGL(dict) AGL(dict) CharStrings(dict) cstring gname
+- }
++ }executeonly
+ {pop} ifelse
+- } forall
++ } executeonly forall
+ pop pop
+- }
++ } executeonly
+ {
+ pop pop pop
+ } ifelse
+- }
++ } executeonly
+ {
+ % scratch(string) RAGL(dict) AGL(dict) CharStrings(dict) cstring gname
+ pop pop
+ } ifelse
+- } forall
++ } executeonly forall
+ 3 1 roll pop pop
+- } if
++ } executeonly if
+ pop
+ dup /.AGLprocessed~GS //true .forceput
+- } if
++ } executeonly if
+
+ %% We need to excute the C .buildfont1 in a stopped context so that, if there
+ %% are errors we can put the stack back sanely and exit. Otherwise callers won't
+--
+2.17.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript_9.27.bb b/meta/recipes-extended/ghostscript/ghostscript_9.27.bb
index 32f938f254..bbd17104e1 100644
--- a/meta/recipes-extended/ghostscript/ghostscript_9.27.bb
+++ b/meta/recipes-extended/ghostscript/ghostscript_9.27.bb
@@ -29,6 +29,7 @@ SRC_URI_BASE = "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/d
file://CVE-2019-14817-0001.patch \
file://CVE-2019-14817-0002.patch \
file://CVE-2019-14869-0001.patch \
+ file://CVE-2019-10216.patch \
"
SRC_URI = "${SRC_URI_BASE} \
diff --git a/meta/recipes-extended/iputils/iputils_s20190709.bb b/meta/recipes-extended/iputils/iputils_s20190709.bb
index 3f9e9917f0..42260f531e 100644
--- a/meta/recipes-extended/iputils/iputils_s20190709.bb
+++ b/meta/recipes-extended/iputils/iputils_s20190709.bb
@@ -32,7 +32,8 @@ PACKAGECONFIG[docs] = "-DBUILD_HTML_MANS=true -DBUILD_MANS=true,-DBUILD_HTML_MAN
inherit meson update-alternatives
-EXTRA_OEMESON += "--prefix=${root_prefix}/"
+# Have to disable setcap/suid as its not deterministic
+EXTRA_OEMESON += "--prefix=${root_prefix}/ -DNO_SETCAP_OR_SUID=true"
ALTERNATIVE_PRIORITY = "100"
diff --git a/meta/recipes-extended/libarchive/libarchive/0001-RAR5-reader-reject-files-that-declare-invalid-header.patch b/meta/recipes-extended/libarchive/libarchive/0001-RAR5-reader-reject-files-that-declare-invalid-header.patch
new file mode 100644
index 0000000000..a84c1f1f76
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/0001-RAR5-reader-reject-files-that-declare-invalid-header.patch
@@ -0,0 +1,124 @@
+From c1fe0a8cc8dde8ba3eae3d17e34060d2d6e4eb96 Mon Sep 17 00:00:00 2001
+From: Grzegorz Antoniak <ga@anadoxin.org>
+Date: Sun, 2 Feb 2020 08:04:41 +0100
+Subject: [PATCH] RAR5 reader: reject files that declare invalid header flags
+
+One of the fields in RAR5's base block structure is the size of the
+header. Some invalid files declare a 0 header size setting, which can
+confuse the unpacker. Minimum header size for RAR5 base blocks is 7
+bytes (4 bytes for CRC, and 3 bytes for the rest), so block size of 0
+bytes should be rejected at header parsing stage.
+
+The fix adds an error condition if header size of 0 bytes is detected.
+In this case, the unpacker will not attempt to unpack the file, as the
+header is corrupted.
+
+The commit also adds OSSFuzz #20459 sample to test further regressions
+in this area.
+
+Upstream-Status: Backport[https://github.com/libarchive/libarchive/commit/94821008d6eea81e315c5881cdf739202961040a]
+CVE: CVE-2020-9308
+
+Signed-off-by: Wenlin Kang <wenlin.kang@windriver.com>
+---
+ Makefile.am | 1 +
+ libarchive/archive_read_support_format_rar5.c | 17 +++++++++++++++--
+ libarchive/test/test_read_format_rar5.c | 15 +++++++++++++++
+ ...d_format_rar5_block_size_is_too_small.rar.uu | 8 ++++++++
+ 4 files changed, 39 insertions(+), 2 deletions(-)
+ create mode 100644 libarchive/test/test_read_format_rar5_block_size_is_too_small.rar.uu
+
+diff --git a/Makefile.am b/Makefile.am
+index da78b24..01abf20 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -863,6 +863,7 @@ libarchive_test_EXTRA_DIST=\
+ libarchive/test/test_read_format_rar5_symlink.rar.uu \
+ libarchive/test/test_read_format_rar5_truncated_huff.rar.uu \
+ libarchive/test/test_read_format_rar5_win32.rar.uu \
++ libarchive/test/test_read_format_rar5_block_size_is_too_small.rar.uu \
+ libarchive/test/test_read_format_raw.bufr.uu \
+ libarchive/test/test_read_format_raw.data.gz.uu \
+ libarchive/test/test_read_format_raw.data.Z.uu \
+diff --git a/libarchive/archive_read_support_format_rar5.c b/libarchive/archive_read_support_format_rar5.c
+index 7c24627..f73393c 100644
+--- a/libarchive/archive_read_support_format_rar5.c
++++ b/libarchive/archive_read_support_format_rar5.c
+@@ -2034,6 +2034,8 @@ static int scan_for_signature(struct archive_read* a);
+ static int process_base_block(struct archive_read* a,
+ struct archive_entry* entry)
+ {
++ const size_t SMALLEST_RAR5_BLOCK_SIZE = 3;
++
+ struct rar5* rar = get_context(a);
+ uint32_t hdr_crc, computed_crc;
+ size_t raw_hdr_size = 0, hdr_size_len, hdr_size;
+@@ -2057,15 +2059,26 @@ static int process_base_block(struct archive_read* a,
+ return ARCHIVE_EOF;
+ }
+
++ hdr_size = raw_hdr_size + hdr_size_len;
++
+ /* Sanity check, maximum header size for RAR5 is 2MB. */
+- if(raw_hdr_size > (2 * 1024 * 1024)) {
++ if(hdr_size > (2 * 1024 * 1024)) {
+ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
+ "Base block header is too large");
+
+ return ARCHIVE_FATAL;
+ }
+
+- hdr_size = raw_hdr_size + hdr_size_len;
++ /* Additional sanity checks to weed out invalid files. */
++ if(raw_hdr_size == 0 || hdr_size_len == 0 ||
++ hdr_size < SMALLEST_RAR5_BLOCK_SIZE)
++ {
++ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
++ "Too small block encountered (%ld bytes)",
++ raw_hdr_size);
++
++ return ARCHIVE_FATAL;
++ }
+
+ /* Read the whole header data into memory, maximum memory use here is
+ * 2MB. */
+diff --git a/libarchive/test/test_read_format_rar5.c b/libarchive/test/test_read_format_rar5.c
+index 1408f37..32e7ed8 100644
+--- a/libarchive/test/test_read_format_rar5.c
++++ b/libarchive/test/test_read_format_rar5.c
+@@ -1194,3 +1194,18 @@ DEFINE_TEST(test_read_format_rar5_fileattr)
+
+ EPILOGUE();
+ }
++
++DEFINE_TEST(test_read_format_rar5_block_size_is_too_small)
++{
++ char buf[4096];
++ PROLOGUE("test_read_format_rar5_block_size_is_too_small.rar");
++
++ /* This file is damaged, so those functions should return failure.
++ * Additionally, SIGSEGV shouldn't be raised during execution
++ * of those functions. */
++
++ assertA(archive_read_next_header(a, &ae) != ARCHIVE_OK);
++ assertA(archive_read_data(a, buf, sizeof(buf)) <= 0);
++
++ EPILOGUE();
++}
+diff --git a/libarchive/test/test_read_format_rar5_block_size_is_too_small.rar.uu b/libarchive/test/test_read_format_rar5_block_size_is_too_small.rar.uu
+new file mode 100644
+index 0000000..5cad219
+--- /dev/null
++++ b/libarchive/test/test_read_format_rar5_block_size_is_too_small.rar.uu
+@@ -0,0 +1,8 @@
++begin 644 test_read_format_rar5_block_size_is_too_small.rar
++M4F%R(1H'`0"-[P+2``+'(!P,("`@N`,!`B`@("`@("`@("`@("`@("#_("`@
++M("`@("`@("`@((:Q;2!4-'-^4B`!((WO`M(``O\@$/\@-R`@("`@("`@("`@
++M``X@("`@("`@____("`@("`@(/\@("`@("`@("`@("#_(+6U,2"UM;6UM[CU
++M)B`@*(0G(`!.`#D\3R``(/__(,+_````-0#_($&%*/HE=C+N`"```"```"`D
++J`)$#("#_("#__P`@__\@_R#_("`@("`@("#_("#__R`@(/__("#__R`"
++`
++end
+--
+2.23.0
+
diff --git a/meta/recipes-extended/libarchive/libarchive_3.4.0.bb b/meta/recipes-extended/libarchive/libarchive_3.4.0.bb
index c196382b07..db45ccf654 100644
--- a/meta/recipes-extended/libarchive/libarchive_3.4.0.bb
+++ b/meta/recipes-extended/libarchive/libarchive_3.4.0.bb
@@ -33,6 +33,7 @@ EXTRA_OECONF += "--enable-largefile"
SRC_URI = "http://libarchive.org/downloads/libarchive-${PV}.tar.gz \
file://CVE-2019-19221.patch \
+ file://0001-RAR5-reader-reject-files-that-declare-invalid-header.patch \
"
SRC_URI[md5sum] = "6046396255bd7cf6d0f6603a9bda39ac"
diff --git a/meta/recipes-extended/libidn/libidn2_2.2.0.bb b/meta/recipes-extended/libidn/libidn2_2.2.0.bb
index bcbfdd85b9..71314149e1 100644
--- a/meta/recipes-extended/libidn/libidn2_2.2.0.bb
+++ b/meta/recipes-extended/libidn/libidn2_2.2.0.bb
@@ -22,7 +22,8 @@ EXTRA_OECONF += "--disable-rpath \
"
do_install_append() {
- sed -i -e 's|-L${STAGING_LIBDIR}||' ${D}${libdir}/pkgconfig/libidn2.pc
+ # Need to remove any duplicate whitespace too for reproducibility
+ sed -i -e 's|-L${STAGING_LIBDIR}||' -e 's/ */ /g' ${D}${libdir}/pkgconfig/libidn2.pc
}
LICENSE_${PN} = "(GPLv2+ | LGPLv3)"
diff --git a/meta/recipes-extended/man-db/man-db_2.8.7.bb b/meta/recipes-extended/man-db/man-db_2.8.7.bb
index 083b2374aa..0d73b03482 100644
--- a/meta/recipes-extended/man-db/man-db_2.8.7.bb
+++ b/meta/recipes-extended/man-db/man-db_2.8.7.bb
@@ -10,7 +10,7 @@ SRC_URI = "${SAVANNAH_NONGNU_MIRROR}/man-db/man-db-${PV}.tar.xz \
SRC_URI[md5sum] = "ec0b23c8314a1654c4d059b2c18ce43d"
SRC_URI[sha256sum] = "b9cd5bb996305d08bfe9e1114edc30b4c97be807093b88af8033ed1cf9beb326"
-DEPENDS = "libpipeline gdbm groff-native base-passwd"
+DEPENDS = "libpipeline gdbm groff-native base-passwd flex-native"
RDEPENDS_${PN} += "base-passwd"
# | /usr/src/debug/man-db/2.8.0-r0/man-db-2.8.0/src/whatis.c:939: undefined reference to `_nl_msg_cat_cntr'
diff --git a/meta/recipes-extended/mc/files/0001-Add-option-to-control-configure-args.patch b/meta/recipes-extended/mc/files/0001-Add-option-to-control-configure-args.patch
new file mode 100644
index 0000000000..e76aac8161
--- /dev/null
+++ b/meta/recipes-extended/mc/files/0001-Add-option-to-control-configure-args.patch
@@ -0,0 +1,99 @@
+From a54501d3c9541bc8600225aa2d42531f93c6def7 Mon Sep 17 00:00:00 2001
+From: Joshua Watt <JPEWhacker@gmail.com>
+Date: Sat, 9 Nov 2019 20:01:48 -0600
+Subject: [PATCH] Add option to control configure args
+
+Embedding the configure time options into the executable can lead to
+non-reproducible builds, since configure options often have embedded
+paths. Add a configure time option to control if the configure args are
+embedded so this can be disabled.
+
+Upstream-Status: Submitted [https://midnight-commander.org/ticket/4031]
+Signed-off-by: Joshua Watt <JPEWhacker@gmail.com>
+---
+ configure.ac | 6 ++++++
+ src/args.c | 6 ++++++
+ src/textconf.c | 2 ++
+ 3 files changed, 14 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index 19d1a76be..a1948f6b9 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -544,6 +544,12 @@ dnl Clarify do we really need GModule
+ AM_CONDITIONAL([HAVE_GMODULE], [test -n "$g_module_supported" && \
+ test x"$textmode_x11_support" = x"yes" -o x"$enable_aspell" = x"yes"])
+
++AC_ARG_ENABLE([configure-args],
++ AS_HELP_STRING([--enable-configure-args], [Handle all compiler warnings as errors]))
++if test "x$enable_configure_args" != xno; then
++ AC_DEFINE([ENABLE_CONFIGURE_ARGS], 1, [Define to enable showing configure arguments in help])
++fi
++
+ AC_DEFINE_UNQUOTED([MC_CONFIGURE_ARGS], ["$ac_configure_args"], [MC configure arguments])
+
+ AC_CONFIG_FILES(
+diff --git a/src/args.c b/src/args.c
+index baef1a1c8..f8dc24020 100644
+--- a/src/args.c
++++ b/src/args.c
+@@ -95,7 +95,9 @@ static gboolean mc_args__nouse_subshell = FALSE;
+ #endif /* ENABLE_SUBSHELL */
+ static gboolean mc_args__show_datadirs = FALSE;
+ static gboolean mc_args__show_datadirs_extended = FALSE;
++#ifdef ENABLE_CONFIGURE_ARGS
+ static gboolean mc_args__show_configure_opts = FALSE;
++#endif
+
+ static GOptionGroup *main_group;
+
+@@ -125,6 +127,7 @@ static const GOptionEntry argument_main_table[] = {
+ NULL
+ },
+
++#ifdef ENABLE_CONFIGURE_ARGS
+ /* show configure options */
+ {
+ "configure-options", '\0', G_OPTION_FLAG_IN_MAIN, G_OPTION_ARG_NONE,
+@@ -132,6 +135,7 @@ static const GOptionEntry argument_main_table[] = {
+ N_("Print configure options"),
+ NULL
+ },
++#endif
+
+ {
+ "printwd", 'P', G_OPTION_FLAG_IN_MAIN, G_OPTION_ARG_STRING,
+@@ -758,11 +762,13 @@ mc_args_show_info (void)
+ return FALSE;
+ }
+
++#ifdef ENABLE_CONFIGURE_ARGS
+ if (mc_args__show_configure_opts)
+ {
+ show_configure_options ();
+ return FALSE;
+ }
++#endif
+
+ return TRUE;
+ }
+diff --git a/src/textconf.c b/src/textconf.c
+index 1e0613e58..f39b9e028 100644
+--- a/src/textconf.c
++++ b/src/textconf.c
+@@ -232,10 +232,12 @@ show_datadirs_extended (void)
+
+ /* --------------------------------------------------------------------------------------------- */
+
++#ifdef ENABLE_CONFIGURE_ARGS
+ void
+ show_configure_options (void)
+ {
+ (void) printf ("%s\n", MC_CONFIGURE_ARGS);
+ }
++#endif
+
+ /* --------------------------------------------------------------------------------------------- */
+--
+2.23.0
+
diff --git a/meta/recipes-extended/mc/files/nomandate.patch b/meta/recipes-extended/mc/files/nomandate.patch
new file mode 100644
index 0000000000..48bd73b110
--- /dev/null
+++ b/meta/recipes-extended/mc/files/nomandate.patch
@@ -0,0 +1,21 @@
+The man page date can vary depending upon the host perl, e.g. in Russian
+some versions print 'июня', others 'Июнь' or Polish 'czerwca' or 'czerwiec'.
+Rather than depend upon perl-native to fix this, just remove the date from
+the manpages.
+
+RP 2020/2/4
+
+Upstream-Status: Inappropriate [OE specficic reproducibility workaround]
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: mc-4.8.23/doc/man/date-of-man-include.am
+===================================================================
+--- mc-4.8.23.orig/doc/man/date-of-man-include.am
++++ mc-4.8.23/doc/man/date-of-man-include.am
+@@ -1,5 +1,5 @@
+ SED_PARAMETERS = \
+- -e "s/%DATE_OF_MAN_PAGE%/$${MAN_DATE}/g" \
++ -e "s/%DATE_OF_MAN_PAGE%//g" \
+ -e "s/%DISTR_VERSION%/@DISTR_VERSION@/g" \
+ -e "s{%prefix%{@prefix@{g" \
+ -e "s{%sysconfdir%{@sysconfdir@{g" \
diff --git a/meta/recipes-extended/mc/mc_4.8.23.bb b/meta/recipes-extended/mc/mc_4.8.23.bb
index 83de8dbb2c..de76591d9b 100644
--- a/meta/recipes-extended/mc/mc_4.8.23.bb
+++ b/meta/recipes-extended/mc/mc_4.8.23.bb
@@ -8,6 +8,8 @@ RDEPENDS_${PN} = "ncurses-terminfo"
SRC_URI = "http://www.midnight-commander.org/downloads/${BPN}-${PV}.tar.bz2 \
file://0001-mc-replace-perl-w-with-use-warnings.patch \
+ file://0001-Add-option-to-control-configure-args.patch \
+ file://nomandate.patch \
"
SRC_URI[md5sum] = "152927ac29cf0e61d7d019f261bb7d89"
SRC_URI[sha256sum] = "238c4552545dcf3065359bd50753abbb150c1b22ec5a36eaa02c82808293267d"
@@ -21,9 +23,12 @@ PACKAGECONFIG ??= ""
PACKAGECONFIG[smb] = "--enable-vfs-smb,--disable-vfs-smb,samba,"
PACKAGECONFIG[sftp] = "--enable-vfs-sftp,--disable-vfs-sftp,libssh2,"
-EXTRA_OECONF = "--with-screen=ncurses --without-gpm-mouse --without-x"
+EXTRA_OECONF = "--with-screen=ncurses --without-gpm-mouse --without-x --disable-configure-args"
CACHED_CONFIGUREVARS += "ac_cv_path_PERL='/usr/bin/env perl'"
+CACHED_CONFIGUREVARS += "ac_cv_path_PYTHON='/usr/bin/env python'"
+CACHED_CONFIGUREVARS += "ac_cv_path_GREP='/usr/bin/env grep'"
+CACHED_CONFIGUREVARS += "mc_cv_have_zipinfo=yes"
do_install_append () {
sed -i -e '1s,#!.*perl,#!${bindir}/env perl,' ${D}${libexecdir}/mc/extfs.d/*
diff --git a/meta/recipes-extended/pam/libpam/pam.d/common-password b/meta/recipes-extended/pam/libpam/pam.d/common-password
index 3896057328..52478dae77 100644
--- a/meta/recipes-extended/pam/libpam/pam.d/common-password
+++ b/meta/recipes-extended/pam/libpam/pam.d/common-password
@@ -10,13 +10,10 @@
# The "sha512" option enables salted SHA512 passwords. Without this option,
# the default is Unix crypt. Prior releases used the option "md5".
#
-# The "obscure" option replaces the old `OBSCURE_CHECKS_ENAB' option in
-# login.defs.
-#
# See the pam_unix manpage for other options.
# here are the per-package modules (the "Primary" block)
-password [success=1 default=ignore] pam_unix.so obscure sha512
+password [success=1 default=ignore] pam_unix.so sha512
# here's the fallback if no module succeeds
password requisite pam_deny.so
# prime the stack with a positive return value if there isn't one already;
diff --git a/meta/recipes-extended/procps/procps/0001-top-avoid-a-potential-SEGV-during-program-terminatio.patch b/meta/recipes-extended/procps/procps/0001-top-avoid-a-potential-SEGV-during-program-terminatio.patch
new file mode 100644
index 0000000000..4f7a01e41b
--- /dev/null
+++ b/meta/recipes-extended/procps/procps/0001-top-avoid-a-potential-SEGV-during-program-terminatio.patch
@@ -0,0 +1,61 @@
+From d37f85c269fbb6e905802ffdbce0ba4173ba21a9 Mon Sep 17 00:00:00 2001
+From: Jim Warner <james.warner@comcast.net>
+Date: Tue, 6 Aug 2019 00:00:00 -0500
+Subject: [PATCH] top: avoid a potential SEGV during program termination
+
+The backtrace shown in the bug report referenced below
+illustrates a 'normal' program termination interrupted
+with some signal, ultimately then causing a top crash.
+
+So this commit just rearranges a little code such that
+all signals will be blocked during that rather lengthy
+end of program processing regardless of how initiated.
+
+[ in that report, ignore the assertion regarding the ]
+[ '-n' option. it obviously was not '1' since do_key ]
+[ had been called, which otherwise wouldn't be true. ]
+
+[ and when it is '1' the -d option would be ignored. ]
+
+Reference(s):
+https://bugzilla.redhat.com/show_bug.cgi?id=1737552
+
+Signed-off-by: Jim Warner <james.warner@comcast.net>
+Upstream-Status: Backport[https://gitlab.com/procps-ng/procps.git]
+Signed-off-by: Shaohua Zhan <shaohua.zhan@windriver.com>
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ top/top.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/top/top.c b/top/top.c
+index b01907a..73598e2 100644
+--- a/top/top.c
++++ b/top/top.c
+@@ -404,6 +404,11 @@ static void at_eoj (void) {
+ * The real program end */
+ static void bye_bye (const char *str) NORETURN;
+ static void bye_bye (const char *str) {
++ sigset_t ss;
++
++// POSIX.1-2004 async-signal-safe: sigfillset, sigprocmask
++ sigfillset(&ss);
++ sigprocmask(SIG_BLOCK, &ss, NULL);
+ at_eoj(); // restore tty in preparation for exit
+ #ifdef ATEOJ_RPTSTD
+ { proc_t *p;
+@@ -595,12 +600,6 @@ static void sig_abexit (int sig) {
+ * SIGUSR1 and SIGUSR2 */
+ static void sig_endpgm (int dont_care_sig) NORETURN;
+ static void sig_endpgm (int dont_care_sig) {
+- sigset_t ss;
+-
+-// POSIX.1-2004 async-signal-safe: sigfillset, sigprocmask
+- sigfillset(&ss);
+- sigprocmask(SIG_BLOCK, &ss, NULL);
+- Frames_signal = BREAK_sig;
+ bye_bye(NULL);
+ (void)dont_care_sig;
+ } // end: sig_endpgm
+--
+GitLab
diff --git a/meta/recipes-extended/procps/procps/0001-top-restore-one-line-of-code-to-sig_endpgm-function.patch b/meta/recipes-extended/procps/procps/0001-top-restore-one-line-of-code-to-sig_endpgm-function.patch
new file mode 100644
index 0000000000..9fe11b898d
--- /dev/null
+++ b/meta/recipes-extended/procps/procps/0001-top-restore-one-line-of-code-to-sig_endpgm-function.patch
@@ -0,0 +1,38 @@
+From ed34b1228ed08fbfdbf6f1a61ca7ca62448ccd86 Mon Sep 17 00:00:00 2001
+From: Jim Warner <james.warner@comcast.net>
+Date: Wed, 22 Jan 2020 00:00:00 -0600
+Subject: [PATCH] top: restore one line of code to sig_endpgm() function
+
+When that potential abend at program end was addressed
+in the patch shown below, one line of code was removed
+in error. That line served to suppress some end-of-job
+reports should ATEOJ_RPTSTD or ATEOJ_RPTHSH be active.
+
+So, this patch restores that previously deleted logic.
+
+Reference(s):
+. potential SEGV fix, master branch
+commit d37f85c269fbb6e905802ffdbce0ba4173ba21a9
+
+Signed-off-by: Jim Warner <james.warner@comcast.net>
+Upstream-Status: Backport[https://gitlab.com/procps-ng/procps.git]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ top/top.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/top/top.c b/top/top.c
+index 8e8c7d9..63ec5fe 100644
+--- a/top/top.c
++++ b/top/top.c
+@@ -604,6 +604,7 @@ static void sig_abexit (int sig) {
+ * SIGUSR1 and SIGUSR2 */
+ static void sig_endpgm (int dont_care_sig) NORETURN;
+ static void sig_endpgm (int dont_care_sig) {
++ Frames_signal = BREAK_sig;
+ bye_bye(NULL);
+ (void)dont_care_sig;
+ } // end: sig_endpgm
+--
+2.17.0
+
diff --git a/meta/recipes-extended/procps/procps_3.3.15.bb b/meta/recipes-extended/procps/procps_3.3.15.bb
index f240e54fd8..bf7ee63775 100644
--- a/meta/recipes-extended/procps/procps_3.3.15.bb
+++ b/meta/recipes-extended/procps/procps_3.3.15.bb
@@ -15,6 +15,8 @@ inherit autotools gettext pkgconfig update-alternatives
SRC_URI = "http://downloads.sourceforge.net/project/procps-ng/Production/procps-ng-${PV}.tar.xz \
file://sysctl.conf \
file://0001-Fix-out-of-tree-builds.patch \
+ file://0001-top-avoid-a-potential-SEGV-during-program-terminatio.patch \
+ file://0001-top-restore-one-line-of-code-to-sig_endpgm-function.patch \
"
SRC_URI[md5sum] = "2b0717a7cb474b3d6dfdeedfbad2eccc"
diff --git a/meta/recipes-extended/psmisc/psmisc.inc b/meta/recipes-extended/psmisc/psmisc.inc
index 594a10cf22..6de5acb71b 100644
--- a/meta/recipes-extended/psmisc/psmisc.inc
+++ b/meta/recipes-extended/psmisc/psmisc.inc
@@ -7,7 +7,7 @@ command sends a specified signal (SIGTERM if nothing is specified) to \
processes identified by name. The fuser command identifies the PIDs \
of processes that are using specified files or filesystems."
SECTION = "base"
-DEPENDS = "ncurses virtual/libintl gettext-native"
+DEPENDS = "ncurses virtual/libintl gettext-native xz-native"
LICENSE = "GPLv2"
SRC_URI = "${SOURCEFORGE_MIRROR}/psmisc/psmisc-${PV}.tar.gz"
diff --git a/meta/recipes-extended/screen/screen/CVE-2020-9366.patch b/meta/recipes-extended/screen/screen/CVE-2020-9366.patch
new file mode 100644
index 0000000000..a52b9e6e68
--- /dev/null
+++ b/meta/recipes-extended/screen/screen/CVE-2020-9366.patch
@@ -0,0 +1,48 @@
+From 8ce90c1d3d5bece150479d8bc9303fd9d9f45e03 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Amadeusz=20S=C5=82awi=C5=84ski?= <amade@asmblr.net>
+Date: Thu, 30 Jan 2020 17:56:27 +0100
+Subject: [PATCH] Fix out of bounds access when setting w_xtermosc after OSC 49
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Amadeusz=20S=C5=82awi=C5=84ski?= <amade@asmblr.net>
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+echo -e "\e]49\e; \n\ec"
+crashes screen.
+
+This happens because 49 is divided by 10 and used as table index
+resulting in access to w_xtermosc[4], which is out of bounds with table
+itself being size 4. Increase size of table by 1 to 5, which is enough
+for all current uses.
+
+As this overwrites memory based on user input it is potential security
+issue.
+
+Reported-by: pippin@gimp.org
+Signed-off-by: Amadeusz Sławiński <amade@asmblr.net>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/screen.git/commit/?h=v.4.8.0&id=68386dfb1fa33471372a8cd2e74686758a2f527b]
+CVE: CVE-2020-9366
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+
+---
+ window.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/window.h b/window.h
+index bd10dcd..a8afa19 100644
+--- a/window.h
++++ b/window.h
+@@ -237,7 +237,7 @@ struct win
+ char w_vbwait;
+ char w_norefresh; /* dont redisplay when switching to that win */
+ #ifdef RXVT_OSC
+- char w_xtermosc[4][MAXSTR]; /* special xterm/rxvt escapes */
++ char w_xtermosc[5][MAXSTR]; /* special xterm/rxvt escapes */
+ #endif
+ int w_mouse; /* mouse mode 0,9,1000 */
+ #ifdef HAVE_BRAILLE
diff --git a/meta/recipes-extended/screen/screen_4.6.2.bb b/meta/recipes-extended/screen/screen_4.6.2.bb
index 21b476ddb0..d00b849021 100644
--- a/meta/recipes-extended/screen/screen_4.6.2.bb
+++ b/meta/recipes-extended/screen/screen_4.6.2.bb
@@ -25,6 +25,7 @@ SRC_URI = "${GNU_MIRROR}/screen/screen-${PV}.tar.gz \
file://0001-fix-for-multijob-build.patch \
file://0001-configure.ac-fix-configure-failed-while-build-dir-ha.patch \
file://0001-Remove-more-compatibility-stuff.patch \
+ file://CVE-2020-9366.patch \
"
SRC_URI[md5sum] = "a0f529d3333b128dfaa324d978ba73a8"
diff --git a/meta/recipes-extended/sudo/sudo.inc b/meta/recipes-extended/sudo/sudo.inc
index 15075bcefd..4edfabe510 100644
--- a/meta/recipes-extended/sudo/sudo.inc
+++ b/meta/recipes-extended/sudo/sudo.inc
@@ -26,7 +26,7 @@ PACKAGECONFIG[pam-wheel] = ",,,pam-plugin-wheel"
CONFFILES_${PN} = "${sysconfdir}/sudoers"
-EXTRA_OECONF = "--with-editor=/bin/vi --with-env-editor"
+EXTRA_OECONF = "--with-editor=${base_bindir}/vi --with-env-editor"
EXTRA_OECONF_append_libc-musl = " --disable-hardening "
diff --git a/meta/recipes-extended/sudo/sudo_1.8.27.bb b/meta/recipes-extended/sudo/sudo_1.8.27.bb
index 0a11a1b28f..6d470d0373 100644
--- a/meta/recipes-extended/sudo/sudo_1.8.27.bb
+++ b/meta/recipes-extended/sudo/sudo_1.8.27.bb
@@ -15,10 +15,18 @@ SRC_URI[sha256sum] = "7beb68b94471ef56d8a1036dbcdc09a7b58a949a68ffce48b83f837dd3
DEPENDS += " virtual/crypt ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}"
RDEPENDS_${PN} += " ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-limits pam-plugin-keyinit', '', d)}"
+CACHED_CONFIGUREVARS = " \
+ ac_cv_type_rsize_t=no \
+ ac_cv_path_MVPROG=${base_bindir}/mv \
+ ac_cv_path_BSHELLPROG=${base_bindir}/sh \
+ ac_cv_path_SENDMAILPROG=${sbindir}/sendmail \
+ ac_cv_path_VIPROG=${base_bindir}/vi \
+ "
+
EXTRA_OECONF += " \
- ac_cv_type_rsize_t=no \
${@bb.utils.contains('DISTRO_FEATURES', 'pam', '--with-pam', '--without-pam', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '--enable-tmpfiles.d=${nonarch_libdir}/tmpfiles.d', '--disable-tmpfiles.d', d)} \
+ --with-vardir=/var/lib/sudo \
"
do_install_append () {
diff --git a/meta/recipes-extended/tar/tar_1.32.bb b/meta/recipes-extended/tar/tar_1.32.bb
index 18f09b5711..ebe6cb0dbd 100644
--- a/meta/recipes-extended/tar/tar_1.32.bb
+++ b/meta/recipes-extended/tar/tar_1.32.bb
@@ -22,6 +22,8 @@ PACKAGECONFIG[acl] = "--with-posix-acls,--without-posix-acls,acl"
EXTRA_OECONF += "DEFAULT_RMT_DIR=${sbindir}"
+CACHED_CONFIGUREVARS += "tar_cv_path_RSH=no"
+
# Let aclocal use the relative path for the m4 file rather than the
# absolute since tar has a lot of m4 files, otherwise there might
# be an "Argument list too long" error when it is built in a long/deep
diff --git a/meta/recipes-extended/timezone/timezone.inc b/meta/recipes-extended/timezone/timezone.inc
index f6bab1acb4..e542290c3c 100644
--- a/meta/recipes-extended/timezone/timezone.inc
+++ b/meta/recipes-extended/timezone/timezone.inc
@@ -4,7 +4,7 @@ SECTION = "base"
LICENSE = "PD & BSD & BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=c679c9d6b02bc2757b3eaf8f53c43fba"
-PV = "2019c"
+PV = "2020a"
SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz;name=tzcode \
http://www.iana.org/time-zones/repository/releases/tzdata${PV}.tar.gz;name=tzdata \
@@ -12,7 +12,7 @@ SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz
UPSTREAM_CHECK_URI = "http://www.iana.org/time-zones"
-SRC_URI[tzcode.md5sum] = "195a17454c5db05cab96595380650391"
-SRC_URI[tzcode.sha256sum] = "f6ebd3668e02d5ed223d3b7b1947561bf2d2da2f4bd1db61efefd9e06c167ed4"
-SRC_URI[tzdata.md5sum] = "f6987e6dfdb2eb83a1b5076a50b80894"
-SRC_URI[tzdata.sha256sum] = "79c7806dab09072308da0e3d22c37d3b245015a591891ea147d3b133b60ffc7c"
+SRC_URI[tzcode.md5sum] = "f87c3477e85a5c4b00df0def6c6a0055"
+SRC_URI[tzcode.sha256sum] = "7d2af7120ee03df71fbca24031ccaf42404752e639196fe93c79a41b38a6d669"
+SRC_URI[tzdata.md5sum] = "96a985bb8eeab535fb8aa2132296763a"
+SRC_URI[tzdata.sha256sum] = "547161eca24d344e0b5f96aff6a76b454da295dc14ed4ca50c2355043fb899a2"
diff --git a/meta/recipes-gnome/gcr/gcr_3.28.1.bb b/meta/recipes-gnome/gcr/gcr_3.28.1.bb
index 2299199c31..64b0569f04 100644
--- a/meta/recipes-gnome/gcr/gcr_3.28.1.bb
+++ b/meta/recipes-gnome/gcr/gcr_3.28.1.bb
@@ -5,7 +5,7 @@ BUGTRACKER = "https://bugzilla.gnome.org/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=55ca817ccb7d5b5b66355690e9abc605"
-DEPENDS = "intltool-native gtk+3 p11-kit glib-2.0 libgcrypt \
+DEPENDS = "intltool-native gtk+3 p11-kit glib-2.0 libgcrypt gnupg-native \
${@bb.utils.contains('GI_DATA_ENABLED', 'True', 'libxslt-native', '', d)}"
inherit gnomebase gtk-icon-cache gtk-doc distro_features_check upstream-version-is-even vala gobject-introspection
diff --git a/meta/recipes-gnome/gtk+/gtk+3/sort-resources.patch b/meta/recipes-gnome/gtk+/gtk+3/sort-resources.patch
new file mode 100644
index 0000000000..7f87372c52
--- /dev/null
+++ b/meta/recipes-gnome/gtk+/gtk+3/sort-resources.patch
@@ -0,0 +1,19 @@
+If the resources file isn't sorted in some way then libgdk.so will differ
+depending on the inode order of the resource files.
+
+Upstream-Status: Pending
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+diff --git a/gdk/Makefile.am b/gdk/Makefile.am
+index e25b57ba50..26f2d57c6e 100644
+--- a/gdk/Makefile.am
++++ b/gdk/Makefile.am
+@@ -465,7 +465,7 @@ stamp-gc-h: $(top_builddir)/config.status
+ # Resources
+ #
+
+-glsl_sources := $(wildcard $(srcdir)/resources/glsl/*.glsl)
++glsl_sources := $(sort $(wildcard $(srcdir)/resources/glsl/*.glsl))
+
+ gdk.gresource.xml: Makefile.am
+ $(AM_V_GEN) echo "<?xml version='1.0' encoding='UTF-8'?>" > $@; \
diff --git a/meta/recipes-gnome/gtk+/gtk+3_3.24.8.bb b/meta/recipes-gnome/gtk+/gtk+3_3.24.8.bb
index d79b18bee0..596dee6264 100644
--- a/meta/recipes-gnome/gtk+/gtk+3_3.24.8.bb
+++ b/meta/recipes-gnome/gtk+/gtk+3_3.24.8.bb
@@ -7,6 +7,7 @@ SRC_URI = "http://ftp.gnome.org/pub/gnome/sources/gtk+/${MAJ_VER}/gtk+-${PV}.tar
file://0002-Do-not-try-to-initialize-GL-without-libGL.patch \
file://0003-Add-disable-opengl-configure-option.patch \
file://link_fribidi.patch \
+ file://sort-resources.patch \
"
SRC_URI[md5sum] = "eeedde01856238114dcf4df3ebc942a5"
SRC_URI[sha256sum] = "666962de9b9768fe9ca785b0e2f42c8b9db3868a12fa9b356b167238d70ac799"
diff --git a/meta/recipes-graphics/jpeg/files/0001-rdppm.c-Fix-buf-overrun-caused-by-bad-binary-PPM.patch b/meta/recipes-graphics/jpeg/files/0001-rdppm.c-Fix-buf-overrun-caused-by-bad-binary-PPM.patch
new file mode 100644
index 0000000000..03b6dba153
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/0001-rdppm.c-Fix-buf-overrun-caused-by-bad-binary-PPM.patch
@@ -0,0 +1,81 @@
+From ade1818b7542ef9e11ece5ce98df91fab45d674c Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Tue, 2 Jun 2020 14:15:37 -0500
+Subject: [PATCH] rdppm.c: Fix buf overrun caused by bad binary PPM
+
+This extends the fix in 1e81b0c3ea26f4ea8f56de05367469333de64a9f to
+include binary PPM files with maximum values < 255, thus preventing a
+malformed binary PPM input file with those specifications from
+triggering an overrun of the rescale array and potentially crashing
+cjpeg, TJBench, or any program that uses the tjLoadImage() function.
+
+Fixes #433
+
+CVE: CVE-2020-13790
+
+Signed-off-by: Liu Haitao <haitao.liu@windriver.com>
+---
+ ChangeLog.md | 20 ++++++++++++++++----
+ rdppm.c | 4 ++--
+ 2 files changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index 3667d12..198c7b8 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -1,3 +1,15 @@
++2.0.4
++=====
++
++### Significant changes relative to 2.0.3:
++
++1. Fixed an issue in the PPM reader that caused a buffer overrun in cjpeg,
++TJBench, or the `tjLoadImage()` function if one of the values in a binary
++PPM/PGM input file exceeded the maximum value defined in the file's header and
++that maximum value was less than 255. libjpeg-turbo 1.5.0 already included a
++similar fix for binary PPM/PGM files with maximum values greater than 255.
++
++
+ 2.0.3
+ =====
+
+@@ -520,10 +532,10 @@ application was linked against.
+
+ 3. Fixed a couple of issues in the PPM reader that would cause buffer overruns
+ in cjpeg if one of the values in a binary PPM/PGM input file exceeded the
+-maximum value defined in the file's header. libjpeg-turbo 1.4.2 already
+-included a similar fix for ASCII PPM/PGM files. Note that these issues were
+-not security bugs, since they were confined to the cjpeg program and did not
+-affect any of the libjpeg-turbo libraries.
++maximum value defined in the file's header and that maximum value was greater
++than 255. libjpeg-turbo 1.4.2 already included a similar fix for ASCII PPM/PGM
++files. Note that these issues were not security bugs, since they were confined
++to the cjpeg program and did not affect any of the libjpeg-turbo libraries.
+
+ 4. Fixed an issue whereby attempting to decompress a JPEG file with a corrupt
+ header using the `tjDecompressToYUV2()` function would cause the function to
+diff --git a/rdppm.c b/rdppm.c
+index 87bc330..a8507b9 100644
+--- a/rdppm.c
++++ b/rdppm.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1991-1997, Thomas G. Lane.
+ * Modified 2009 by Bill Allombert, Guido Vollbeding.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2015-2017, D. R. Commander.
++ * Copyright (C) 2015-2017, 2020, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -720,7 +720,7 @@ start_input_ppm(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
+ /* On 16-bit-int machines we have to be careful of maxval = 65535 */
+ source->rescale = (JSAMPLE *)
+ (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
+- (size_t)(((long)maxval + 1L) *
++ (size_t)(((long)MAX(maxval, 255) + 1L) *
+ sizeof(JSAMPLE)));
+ half_maxval = maxval / 2;
+ for (val = 0; val <= (long)maxval; val++) {
+--
+2.17.0
+
diff --git a/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.3.bb b/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.3.bb
index 1cf854de62..8ea81f386f 100644
--- a/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.3.bb
+++ b/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.3.bb
@@ -12,6 +12,7 @@ DEPENDS_append_x86_class-target = " nasm-native"
SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}-${PV}.tar.gz \
file://0001-libjpeg-turbo-fix-package_qa-error.patch \
+ file://0001-rdppm.c-Fix-buf-overrun-caused-by-bad-binary-PPM.patch \
"
SRC_URI[md5sum] = "bd07fddf26f9def7bab02739eb655116"
diff --git a/meta/recipes-graphics/mesa/files/0003-Allow-enable-DRI-without-DRI-drivers.patch b/meta/recipes-graphics/mesa/files/0003-Allow-enable-DRI-without-DRI-drivers.patch
index 3458c19199..346b217585 100644
--- a/meta/recipes-graphics/mesa/files/0003-Allow-enable-DRI-without-DRI-drivers.patch
+++ b/meta/recipes-graphics/mesa/files/0003-Allow-enable-DRI-without-DRI-drivers.patch
@@ -23,7 +23,7 @@ index 0e50bb26c0a..de065c290d6 100644
with_dri_swrast = dri_drivers.contains('swrast')
-with_dri = dri_drivers.length() != 0 and dri_drivers != ['']
-+with_dri = get_option('dri') or (_drivers.length() != 0 and _drivers != [''])
++with_dri = get_option('dri') or (dri_drivers.length() != 0 and dri_drivers != [''])
gallium_drivers = get_option('gallium-drivers')
if gallium_drivers.contains('auto')
diff --git a/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2019-18390.patch b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2019-18390.patch
new file mode 100644
index 0000000000..ad61c95be3
--- /dev/null
+++ b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2019-18390.patch
@@ -0,0 +1,66 @@
+From 24f67de7a9088a873844a39be03cee6882260ac9 Mon Sep 17 00:00:00 2001
+From: Gert Wollny <gert.wollny@collabora.com>
+Date: Mon, 7 Oct 2019 10:59:56 +0200
+Subject: [PATCH] vrend: check info formats in blits
+
+Closes #141
+Closes #142
+
+v2 : drop colon in error description (Emil)
+
+Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
+Reviewed-by: Emil Velikov <emil.velikov@collabora.com>
+
+Upstream-Status: Backport
+[https://gitlab.freedesktop.org/virgl/virglrenderer/commit/24f67de7a9088a873844a39be03cee6882260ac9]
+CVE: CVE-2019-18390
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ src/virgl_hw.h | 1 +
+ src/vrend_renderer.c | 11 +++++++++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/src/virgl_hw.h b/src/virgl_hw.h
+index 145780bf..5ccf3073 100644
+--- a/src/virgl_hw.h
++++ b/src/virgl_hw.h
+@@ -426,6 +426,7 @@ enum virgl_ctx_errors {
+ VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER,
+ VIRGL_ERROR_CTX_GLES_HAVE_TES_BUT_MISS_TCS,
+ VIRGL_ERROR_GL_ANY_SAMPLES_PASSED,
++ VIRGL_ERROR_CTX_ILLEGAL_FORMAT,
+ };
+
+ #define VIRGL_RESOURCE_Y_0_TOP (1 << 0)
+diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
+index 14fefb38..aa6a89c1 100644
+--- a/src/vrend_renderer.c
++++ b/src/vrend_renderer.c
+@@ -758,6 +758,7 @@ static const char *vrend_ctx_error_strings[] = {
+ [VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER] = "Illegal command buffer",
+ [VIRGL_ERROR_CTX_GLES_HAVE_TES_BUT_MISS_TCS] = "On GLES context and shader program has tesselation evaluation shader but no tesselation control shader",
+ [VIRGL_ERROR_GL_ANY_SAMPLES_PASSED] = "Query for ANY_SAMPLES_PASSED not supported",
++ [VIRGL_ERROR_CTX_ILLEGAL_FORMAT] = "Illegal format ID",
+ };
+
+ static void __report_context_error(const char *fname, struct vrend_context *ctx,
+@@ -8492,6 +8493,16 @@ void vrend_renderer_blit(struct vrend_context *ctx,
+ if (ctx->in_error)
+ return;
+
++ if (!info->src.format || (enum virgl_formats)info->src.format >= VIRGL_FORMAT_MAX) {
++ report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_FORMAT, info->src.format);
++ return;
++ }
++
++ if (!info->dst.format || (enum virgl_formats)info->dst.format >= VIRGL_FORMAT_MAX) {
++ report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_FORMAT, info->dst.format);
++ return;
++ }
++
+ if (info->render_condition_enable == false)
+ vrend_pause_render_condition(ctx, true);
+
+--
+2.24.1
+
diff --git a/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2019-18391.patch b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2019-18391.patch
new file mode 100644
index 0000000000..cc641d8293
--- /dev/null
+++ b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2019-18391.patch
@@ -0,0 +1,51 @@
+From 2abeb1802e3c005b17a7123e382171b3fb665971 Mon Sep 17 00:00:00 2001
+From: Gert Wollny <gert.wollny@collabora.com>
+Date: Tue, 8 Oct 2019 17:27:01 +0200
+Subject: [PATCH] vrend: check that the transfer iov holds enough data for the
+ data upload
+
+Closes #140
+
+Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
+Reviewed-by: Emil Velikov <emil.velikov@collabora.com>
+
+Upstream-Status: Backport
+[https://gitlab.freedesktop.org/virgl/virglrenderer/commit/2abeb1802e3c005b17a7123e382171b3fb665971]
+CVE: CVE-2019-18391
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ src/vrend_renderer.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
+index 694e1d0e..fe23846b 100644
+--- a/src/vrend_renderer.c
++++ b/src/vrend_renderer.c
+@@ -7005,15 +7005,22 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
+ invert = true;
+ }
+
++ send_size = util_format_get_nblocks(res->base.format, info->box->width,
++ info->box->height) * elsize;
++ if (res->target == GL_TEXTURE_3D ||
++ res->target == GL_TEXTURE_2D_ARRAY ||
++ res->target == GL_TEXTURE_CUBE_MAP_ARRAY)
++ send_size *= info->box->depth;
++
+ if (need_temp) {
+- send_size = util_format_get_nblocks(res->base.format, info->box->width,
+- info->box->height) * elsize * info->box->depth;
+ data = malloc(send_size);
+ if (!data)
+ return ENOMEM;
+ read_transfer_data(iov, num_iovs, data, res->base.format, info->offset,
+ stride, layer_stride, info->box, invert);
+ } else {
++ if (send_size > iov[0].iov_len - info->offset)
++ return EINVAL;
+ data = (char*)iov[0].iov_base + info->offset;
+ }
+
+--
+2.24.1
+
diff --git a/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2020-8002.patch b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2020-8002.patch
new file mode 100644
index 0000000000..925f2c8eb0
--- /dev/null
+++ b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2020-8002.patch
@@ -0,0 +1,39 @@
+From 63bcca251f093d83da7e290ab4bbd38ae69089b5 Mon Sep 17 00:00:00 2001
+From: Gert Wollny <gert.wollny@collabora.com>
+Date: Wed, 15 Jan 2020 13:43:58 +0100
+Subject: [PATCH] vrend: Don't try launching a grid if no CS is available
+
+Closes #155
+
+Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
+Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
+
+Upstream-Status: Backport
+[https://gitlab.freedesktop.org/virgl/virglrenderer/-/commit/63bcca251f093d83da7e290ab4bbd38ae69089b5.patch]
+CVE: CVE-2020-8002
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ src/vrend_renderer.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
+index a054bad8..2280fc43 100644
+--- a/src/vrend_renderer.c
++++ b/src/vrend_renderer.c
+@@ -4604,6 +4604,13 @@ void vrend_launch_grid(struct vrend_context *ctx,
+ }
+ ctx->sub->shader_dirty = true;
+ }
++
++ if (!ctx->sub->prog) {
++ vrend_printf("%s: Skipping compute shader execution due to missing shaders: %s\n",
++ __func__, ctx->debug_name);
++ return;
++ }
++
+ vrend_use_program(ctx, ctx->sub->prog->id);
+
+ vrend_draw_bind_ubo_shader(ctx, PIPE_SHADER_COMPUTE, 0);
+--
+2.24.1
+
diff --git a/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.0.bb b/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.0.bb
index d2b11c103a..e91ccc6c57 100644
--- a/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.0.bb
+++ b/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.0.bb
@@ -8,6 +8,9 @@ DEPENDS = "libdrm mesa libepoxy"
SRCREV = "48cc96c9aebb9d0164830a157efc8916f08f00c0"
SRC_URI = "git://anongit.freedesktop.org/virglrenderer \
file://0001-gallium-Expand-libc-check-to-be-platform-OS-check.patch \
+ file://CVE-2019-18390.patch \
+ file://CVE-2019-18391.patch \
+ file://CVE-2020-8002.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/waffle/waffle_1.6.0.bb b/meta/recipes-graphics/waffle/waffle_1.6.0.bb
index 8a1d5748f6..82cead9ad1 100644
--- a/meta/recipes-graphics/waffle/waffle_1.6.0.bb
+++ b/meta/recipes-graphics/waffle/waffle_1.6.0.bb
@@ -35,3 +35,8 @@ PACKAGECONFIG[x11-egl] = "-Dx11_egl=enabled,-Dx11_egl=disabled,virtual/${MLPREFI
PACKAGECONFIG[surfaceless-egl] = "-Dsurfaceless_egl=enabled,-Dsurfaceless_egl=disabled,virtual/${MLPREFIX}libgl"
# TODO: optionally build manpages and examples
+
+# Unset these to stop python trying to report the target Python setup
+_PYTHON_SYSCONFIGDATA_NAME[unexport] = "1"
+STAGING_INCDIR[unexport] = "1"
+STAGING_LIBDIR[unexport] = "1"
diff --git a/meta/recipes-graphics/wayland/libinput/determinism.patch b/meta/recipes-graphics/wayland/libinput/determinism.patch
new file mode 100644
index 0000000000..cb554030cf
--- /dev/null
+++ b/meta/recipes-graphics/wayland/libinput/determinism.patch
@@ -0,0 +1,21 @@
+This finds our outer git tree and that version information breaks
+determinism of this recipe. Disable it.
+
+RP 2020/2/6
+
+Upstream-Status: Pending
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+Index: libinput-1.14.3/meson.build
+===================================================================
+--- libinput-1.14.3.orig/meson.build
++++ libinput-1.14.3/meson.build
+@@ -387,7 +387,7 @@ pkgconfig.generate(
+ libraries : lib_libinput
+ )
+
+-git_version_h = vcs_tag(command : ['git', 'describe'],
++git_version_h = vcs_tag(command : ['false'],
+ fallback : 'unknown',
+ input : 'src/libinput-git-version.h.in',
+ output :'libinput-git-version.h')
diff --git a/meta/recipes-graphics/wayland/libinput_1.14.1.bb b/meta/recipes-graphics/wayland/libinput_1.14.1.bb
index 38bc8d2c33..2c5733f33a 100644
--- a/meta/recipes-graphics/wayland/libinput_1.14.1.bb
+++ b/meta/recipes-graphics/wayland/libinput_1.14.1.bb
@@ -7,7 +7,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=1f2ea9ebff3a2c6d458faf58492efb63"
DEPENDS = "libevdev udev mtdev"
-SRC_URI = "http://www.freedesktop.org/software/${BPN}/${BP}.tar.xz"
+SRC_URI = "http://www.freedesktop.org/software/${BPN}/${BP}.tar.xz \
+ file://determinism.patch \
+"
SRC_URI[md5sum] = "da29a704dc6f7ea2d5aac754db046340"
SRC_URI[sha256sum] = "e333a3242835c019ca37d2cef8b51a87d3138eb47444119c0153dc7a8656ee70"
diff --git a/meta/recipes-graphics/x11-common/xserver-nodm-init/capability.conf b/meta/recipes-graphics/x11-common/xserver-nodm-init/capability.conf
new file mode 100644
index 0000000000..7ab7460816
--- /dev/null
+++ b/meta/recipes-graphics/x11-common/xserver-nodm-init/capability.conf
@@ -0,0 +1,2 @@
+cap_sys_admin @USER@
+none *
diff --git a/meta/recipes-graphics/x11-common/xserver-nodm-init/xserver-nodm b/meta/recipes-graphics/x11-common/xserver-nodm-init/xserver-nodm
index 6c548551b8..116bb278bc 100755
--- a/meta/recipes-graphics/x11-common/xserver-nodm-init/xserver-nodm
+++ b/meta/recipes-graphics/x11-common/xserver-nodm-init/xserver-nodm
@@ -38,6 +38,14 @@ case "$1" in
if [ -e /dev/hidraw0 ]; then
chmod o+rw /dev/hidraw*
fi
+ # Make sure that the Xorg has the cap_sys_admin capability which is
+ # needed for setting the drm master
+ if ! grep -q "^auth.*pam_cap\.so" /etc/pam.d/su; then
+ echo "auth optional pam_cap.so" >>/etc/pam.d/su
+ fi
+ if ! /usr/sbin/getcap $XSERVER | grep -q cap_sys_admin; then
+ /usr/sbin/setcap cap_sys_admin+eip $XSERVER
+ fi
fi
# Using su rather than sudo as latest 1.8.1 cause failure [YOCTO #1211]
diff --git a/meta/recipes-graphics/x11-common/xserver-nodm-init_3.0.bb b/meta/recipes-graphics/x11-common/xserver-nodm-init_3.0.bb
index a77c56445c..7f4e1e29f1 100644
--- a/meta/recipes-graphics/x11-common/xserver-nodm-init_3.0.bb
+++ b/meta/recipes-graphics/x11-common/xserver-nodm-init_3.0.bb
@@ -10,6 +10,7 @@ SRC_URI = "file://xserver-nodm \
file://gplv2-license.patch \
file://xserver-nodm.service.in \
file://xserver-nodm.conf.in \
+ file://capability.conf \
"
S = "${WORKDIR}"
@@ -19,7 +20,7 @@ PACKAGE_ARCH = "${MACHINE_ARCH}"
inherit update-rc.d systemd distro_features_check
-REQUIRED_DISTRO_FEATURES = "x11"
+REQUIRED_DISTRO_FEATURES = "x11 ${@oe.utils.conditional('ROOTLESS_X', '1', 'pam', '', d)}"
PACKAGECONFIG ??= "blank"
# dpms and screen saver will be on only if 'blank' is in PACKAGECONFIG
@@ -40,6 +41,8 @@ do_install() {
if [ "${ROOTLESS_X}" = "1" ] ; then
XUSER_HOME="/home/xuser"
XUSER="xuser"
+ install -D capability.conf ${D}${sysconfdir}/security/capability.conf
+ sed -i "s:@USER@:${XUSER}:" ${D}${sysconfdir}/security/capability.conf
else
XUSER_HOME=${ROOT_HOME}
XUSER="root"
@@ -60,7 +63,7 @@ do_install() {
fi
}
-RDEPENDS_${PN} = "xinit ${@oe.utils.conditional('ROOTLESS_X', '1', 'xuser-account', '', d)}"
+RDEPENDS_${PN} = "xinit ${@oe.utils.conditional('ROOTLESS_X', '1', 'xuser-account libcap libcap-bin', '', d)}"
INITSCRIPT_NAME = "xserver-nodm"
INITSCRIPT_PARAMS = "start 9 5 . stop 20 0 1 2 3 6 ."
diff --git a/meta/recipes-graphics/xorg-app/xorg-app-common.inc b/meta/recipes-graphics/xorg-app/xorg-app-common.inc
index 3529cb26ef..211e399cf0 100644
--- a/meta/recipes-graphics/xorg-app/xorg-app-common.inc
+++ b/meta/recipes-graphics/xorg-app/xorg-app-common.inc
@@ -12,6 +12,6 @@ INC_PR = "r8"
SRC_URI = "${XORG_MIRROR}/individual/app/${BPN}-${PV}.tar.bz2"
-inherit autotools pkgconfig distro_features_check
+inherit autotools pkgconfig distro_features_check gettext
FILES_${PN} += " ${libdir}/X11/${BPN} ${datadir}/X11/app-defaults/"
diff --git a/meta/recipes-graphics/xorg-font/encodings_1.0.5.bb b/meta/recipes-graphics/xorg-font/encodings_1.0.5.bb
index a39609b5da..74014ff91b 100644
--- a/meta/recipes-graphics/xorg-font/encodings_1.0.5.bb
+++ b/meta/recipes-graphics/xorg-font/encodings_1.0.5.bb
@@ -19,3 +19,7 @@ SRC_URI[sha256sum] = "bd96e16143a044b19e87f217cf6a3763a70c561d1076aad6f6d862ec41
inherit allarch
EXTRA_OECONF += "--with-encodingsdir=${datadir}/fonts/X11/encodings"
+
+# postinst from .inc doesn't apply to this recipe
+pkg_postinst_${PN} () {
+}
diff --git a/meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb b/meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb
index 85a48e4c58..cc45696530 100644
--- a/meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb
+++ b/meta/recipes-graphics/xorg-lib/libxshmfence_1.3.bb
@@ -11,6 +11,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=47e508ca280fde97906eacb77892c3ac"
DEPENDS += "virtual/libx11"
+EXTRA_OECONF += "--with-shared-memory-dir=/dev/shm"
+
BBCLASSEXTEND = "native nativesdk"
SRC_URI[md5sum] = "42dda8016943dc12aff2c03a036e0937"
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14347.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14347.patch
new file mode 100644
index 0000000000..20a604869b
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2020-14347.patch
@@ -0,0 +1,37 @@
+From aac28e162e5108510065ad4c323affd6deffd816 Mon Sep 17 00:00:00 2001
+From: Matthieu Herrb <matthieu@herrb.eu>
+Date: Sat, 25 Jul 2020 19:33:50 +0200
+Subject: [PATCH] fix for ZDI-11426
+
+Avoid leaking un-initalized memory to clients by zeroing the
+whole pixmap on initial allocation.
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Matthieu Herrb <matthieu@herrb.eu>
+Reviewed-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport
+CVE: CVE-2020-14347
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ dix/pixmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/dix/pixmap.c b/dix/pixmap.c
+index 1186d7dbb..5a0146bbb 100644
+--- a/dix/pixmap.c
++++ b/dix/pixmap.c
+@@ -116,7 +116,7 @@ AllocatePixmap(ScreenPtr pScreen, int pixDataSize)
+ if (pScreen->totalPixmapSize > ((size_t) - 1) - pixDataSize)
+ return NullPixmap;
+
+- pPixmap = malloc(pScreen->totalPixmapSize + pixDataSize);
++ pPixmap = calloc(1, pScreen->totalPixmapSize + pixDataSize);
+ if (!pPixmap)
+ return NullPixmap;
+
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.5.bb b/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.5.bb
index 3de6d22e57..f0f15a2584 100644
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.5.bb
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.5.bb
@@ -5,6 +5,7 @@ SRC_URI += "file://0001-xf86pciBus.c-use-Intel-ddx-only-for-pre-gen4-hardwar.pat
file://0001-test-xtest-Initialize-array-with-braces.patch \
file://0001-compiler.h-Do-not-include-sys-io.h-on-ARM-with-glibc.patch \
file://sdksyms-no-build-path.patch \
+ file://CVE-2020-14347.patch \
"
SRC_URI[md5sum] = "c9fc7e21e11286dbedd22c00df652130"
SRC_URI[sha256sum] = "a81d8243f37e75a03d4f8c55f96d0bc25802be6ec45c3bfa5cb614c6d01bac9d"
diff --git a/meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb b/meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb
index b6e0a1e9e2..93c4472316 100644
--- a/meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-rt_4.19.bb
@@ -11,13 +11,13 @@ python () {
raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it")
}
-SRCREV_machine ?= "2fbf678238302f33b3aec5a2cba829f260744f24"
-SRCREV_meta ?= "4f5d761316a9cf14605e5d0cc91b53c1b2e9dc6a"
+SRCREV_machine ?= "40e34fdcb540e35b1a97e8e52c11dfe52bd68b16"
+SRCREV_meta ?= "7cb520d405cd5ca8f21a333941fbc0861bbb36b0"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.19;destsuffix=${KMETA}"
-LINUX_VERSION ?= "4.19.87"
+LINUX_VERSION ?= "4.19.107"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
diff --git a/meta/recipes-kernel/linux/linux-yocto-rt_5.2.bb b/meta/recipes-kernel/linux/linux-yocto-rt_5.2.bb
index 5391e052c5..a23a5e6f93 100644
--- a/meta/recipes-kernel/linux/linux-yocto-rt_5.2.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-rt_5.2.bb
@@ -11,13 +11,13 @@ python () {
raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it")
}
-SRCREV_machine ?= "e2d396270864afd14f5882ce8921d8fb562f5665"
-SRCREV_meta ?= "dd6019025cbb701b9818102f267c26e87031a59b"
+SRCREV_machine ?= "78e147f949b5b18524aa7bd72f1cc8f7ae8039f8"
+SRCREV_meta ?= "bb2776d6beaae64b1a0fc902b64376f082085498"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.2;destsuffix=${KMETA}"
-LINUX_VERSION ?= "5.2.28"
+LINUX_VERSION ?= "5.2.32"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
diff --git a/meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb b/meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb
index e2626ab4c9..76b2467ef5 100644
--- a/meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-tiny_4.19.bb
@@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig"
require recipes-kernel/linux/linux-yocto.inc
-LINUX_VERSION ?= "4.19.87"
+LINUX_VERSION ?= "4.19.107"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
@@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native"
KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
-SRCREV_machine_qemuarm ?= "bd239fb802a15c2759ea456dd1f09f5e106fc88a"
-SRCREV_machine ?= "b44ad1b1e7c685e75b7788a026a2416edc2ee656"
-SRCREV_meta ?= "4f5d761316a9cf14605e5d0cc91b53c1b2e9dc6a"
+SRCREV_machine_qemuarm ?= "e2c947b59c650f2aa2f0f88d6af90f9dfb336e04"
+SRCREV_machine ?= "16ae5406361af8329b74580697cb738dadeb1ecb"
+SRCREV_meta ?= "7cb520d405cd5ca8f21a333941fbc0861bbb36b0"
PV = "${LINUX_VERSION}+git${SRCPV}"
diff --git a/meta/recipes-kernel/linux/linux-yocto-tiny_5.2.bb b/meta/recipes-kernel/linux/linux-yocto-tiny_5.2.bb
index 986dd6e351..ac9904f415 100644
--- a/meta/recipes-kernel/linux/linux-yocto-tiny_5.2.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-tiny_5.2.bb
@@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig"
require recipes-kernel/linux/linux-yocto.inc
-LINUX_VERSION ?= "5.2.28"
+LINUX_VERSION ?= "5.2.32"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
@@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native"
KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
-SRCREV_machine_qemuarm ?= "d79fa780eef7c3b08fcff8a44070c211afa91214"
-SRCREV_machine ?= "992280855e88289b7e7019ee2cf9dff867c58b94"
-SRCREV_meta ?= "dd6019025cbb701b9818102f267c26e87031a59b"
+SRCREV_machine_qemuarm ?= "e0a3a01b24070b15121e938ea19755091bf0d662"
+SRCREV_machine ?= "73b12de4c879e4569bef3b2d0ee9c783a9788b27"
+SRCREV_meta ?= "bb2776d6beaae64b1a0fc902b64376f082085498"
PV = "${LINUX_VERSION}+git${SRCPV}"
diff --git a/meta/recipes-kernel/linux/linux-yocto_4.19.bb b/meta/recipes-kernel/linux/linux-yocto_4.19.bb
index c6e482a984..6e3b00e0e5 100644
--- a/meta/recipes-kernel/linux/linux-yocto_4.19.bb
+++ b/meta/recipes-kernel/linux/linux-yocto_4.19.bb
@@ -11,22 +11,22 @@ KBRANCH_qemux86 ?= "v4.19/standard/base"
KBRANCH_qemux86-64 ?= "v4.19/standard/base"
KBRANCH_qemumips64 ?= "v4.19/standard/mti-malta64"
-SRCREV_machine_qemuarm ?= "19fa1657d1d82d01647c6f73a2bbf39305505294"
-SRCREV_machine_qemuarm64 ?= "b44ad1b1e7c685e75b7788a026a2416edc2ee656"
-SRCREV_machine_qemumips ?= "8fb7ab96b84852ee3d9e1d9d9e7bc35e1249b653"
-SRCREV_machine_qemuppc ?= "b44ad1b1e7c685e75b7788a026a2416edc2ee656"
-SRCREV_machine_qemux86 ?= "b44ad1b1e7c685e75b7788a026a2416edc2ee656"
-SRCREV_machine_qemux86-64 ?= "b44ad1b1e7c685e75b7788a026a2416edc2ee656"
-SRCREV_machine_qemumips64 ?= "c8a036abd7d469013dddab15a23e0d2dde1d0000"
-SRCREV_machine ?= "b44ad1b1e7c685e75b7788a026a2416edc2ee656"
-SRCREV_meta ?= "4f5d761316a9cf14605e5d0cc91b53c1b2e9dc6a"
+SRCREV_machine_qemuarm ?= "c8b87f4d12eb957d8a95442a928ef4820037bb55"
+SRCREV_machine_qemuarm64 ?= "16ae5406361af8329b74580697cb738dadeb1ecb"
+SRCREV_machine_qemumips ?= "94f102eaca76ffdcc3d47ea94b47486d7157c531"
+SRCREV_machine_qemuppc ?= "16ae5406361af8329b74580697cb738dadeb1ecb"
+SRCREV_machine_qemux86 ?= "16ae5406361af8329b74580697cb738dadeb1ecb"
+SRCREV_machine_qemux86-64 ?= "16ae5406361af8329b74580697cb738dadeb1ecb"
+SRCREV_machine_qemumips64 ?= "98288b7e79bc8130c2a889d763c9c1aa15ff4939"
+SRCREV_machine ?= "16ae5406361af8329b74580697cb738dadeb1ecb"
+SRCREV_meta ?= "7cb520d405cd5ca8f21a333941fbc0861bbb36b0"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRANCH}; \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.19;destsuffix=${KMETA} \
"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
-LINUX_VERSION ?= "4.19.87"
+LINUX_VERSION ?= "4.19.107"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
DEPENDS += "openssl-native util-linux-native"
diff --git a/meta/recipes-kernel/linux/linux-yocto_5.2.bb b/meta/recipes-kernel/linux/linux-yocto_5.2.bb
index 358c0ad80a..eab142e1c6 100644
--- a/meta/recipes-kernel/linux/linux-yocto_5.2.bb
+++ b/meta/recipes-kernel/linux/linux-yocto_5.2.bb
@@ -12,16 +12,16 @@ KBRANCH_qemux86 ?= "v5.2/standard/base"
KBRANCH_qemux86-64 ?= "v5.2/standard/base"
KBRANCH_qemumips64 ?= "v5.2/standard/mti-malta64"
-SRCREV_machine_qemuarm ?= "ed43b791f2cca6e87928fa47556e540333385187"
-SRCREV_machine_qemuarm64 ?= "992280855e88289b7e7019ee2cf9dff867c58b94"
-SRCREV_machine_qemumips ?= "5d47f37ab0b7bcd5c0aaf0ecbd6d00bb8a22ddf4"
-SRCREV_machine_qemuppc ?= "992280855e88289b7e7019ee2cf9dff867c58b94"
-SRCREV_machine_qemuriscv64 ?= "992280855e88289b7e7019ee2cf9dff867c58b94"
-SRCREV_machine_qemux86 ?= "992280855e88289b7e7019ee2cf9dff867c58b94"
-SRCREV_machine_qemux86-64 ?= "992280855e88289b7e7019ee2cf9dff867c58b94"
-SRCREV_machine_qemumips64 ?= "894ee953d9c4036003f41e0800315efe3bab8492"
-SRCREV_machine ?= "992280855e88289b7e7019ee2cf9dff867c58b94"
-SRCREV_meta ?= "dd6019025cbb701b9818102f267c26e87031a59b"
+SRCREV_machine_qemuarm ?= "fdb7cd1bb5e4238e5b3d120ce9db31119ec2b5ee"
+SRCREV_machine_qemuarm64 ?= "73b12de4c879e4569bef3b2d0ee9c783a9788b27"
+SRCREV_machine_qemumips ?= "eb7faee13cfce200e9add4ba1852a3fe5d8b92e6"
+SRCREV_machine_qemuppc ?= "73b12de4c879e4569bef3b2d0ee9c783a9788b27"
+SRCREV_machine_qemuriscv64 ?= "73b12de4c879e4569bef3b2d0ee9c783a9788b27"
+SRCREV_machine_qemux86 ?= "73b12de4c879e4569bef3b2d0ee9c783a9788b27"
+SRCREV_machine_qemux86-64 ?= "73b12de4c879e4569bef3b2d0ee9c783a9788b27"
+SRCREV_machine_qemumips64 ?= "8e3bfeb7e9b5aa92c5bea941d361ff5b081a2aaa"
+SRCREV_machine ?= "73b12de4c879e4569bef3b2d0ee9c783a9788b27"
+SRCREV_meta ?= "bb2776d6beaae64b1a0fc902b64376f082085498"
# remap qemuarm to qemuarma15 for the 5.2 kernel
# KMACHINE_qemuarm ?= "qemuarma15"
@@ -30,7 +30,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRA
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.2;destsuffix=${KMETA}"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
-LINUX_VERSION ?= "5.2.28"
+LINUX_VERSION ?= "5.2.32"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
DEPENDS += "openssl-native util-linux-native"
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-SUNRPC-Fix-oops-when-trace-sunrpc_task-events-in.patch b/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-SUNRPC-Fix-oops-when-trace-sunrpc_task-events-in.patch
deleted file mode 100644
index bdbc4f811e..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-SUNRPC-Fix-oops-when-trace-sunrpc_task-events-in.patch
+++ /dev/null
@@ -1,94 +0,0 @@
-From 1ff7013bcf7f068cf4371d12d758f9c0fd16a619 Mon Sep 17 00:00:00 2001
-From: Quanyang Wang <quanyang.wang@windriver.com>
-Date: Thu, 5 Dec 2019 15:35:32 +0800
-Subject: [PATCH 1/4] Fix: SUNRPC: Fix oops when trace sunrpc_task events in
- nfs client
-
-See upstream commit :
-
- commit 2ca310fc4160ed0420da65534a21ae77b24326a8
- Author: Ditang Chen <chendt.fnst@cn.fujitsu.com>
- Date: Fri, 7 Mar 2014 13:27:57 +0800
- Subject: SUNRPC: Fix oops when trace sunrpc_task events in nfs client
-
- When tracking sunrpc_task events in nfs client, the clnt pointer may be NULL.
-
- [ 139.269266] BUG: unable to handle kernel NULL pointer dereference at 0000000000000004
- [ 139.269915] IP: [<ffffffffa026f216>] ftrace_raw_event_rpc_task_running+0x86/0xf0 [sunrpc]
- [ 139.269915] PGD 1d293067 PUD 1d294067 PMD 0
- [ 139.269915] Oops: 0000 [#1] SMP
- [ 139.269915] Modules linked in: nfsv4 dns_resolver nfs lockd sunrpc fscache sg ppdev e1000
- serio_raw pcspkr parport_pc parport i2c_piix4 i2c_core microcode xfs libcrc32c sd_mod sr_mod
- cdrom ata_generic crc_t10dif crct10dif_common pata_acpi ahci libahci ata_piix libata dm_mirror
- dm_region_hash dm_log dm_mod
- [ 139.269915] CPU: 0 PID: 59 Comm: kworker/0:2 Not tainted 3.10.0-84.el7.x86_64 #1
- [ 139.269915] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
- [ 139.269915] Workqueue: rpciod rpc_async_schedule [sunrpc]
- [ 139.269915] task: ffff88001b598000 ti: ffff88001b632000 task.ti: ffff88001b632000
- [ 139.269915] RIP: 0010:[<ffffffffa026f216>] [<ffffffffa026f216>] ftrace_raw_event_rpc_task_running+0x86/0xf0 [sunrpc]
- [ 139.269915] RSP: 0018:ffff88001b633d70 EFLAGS: 00010206
- [ 139.269915] RAX: ffff88001dfc5338 RBX: ffff88001cc37a00 RCX: ffff88001dfc5334
- [ 139.269915] RDX: ffff88001dfc5338 RSI: 0000000000000000 RDI: ffff88001dfc533c
- [ 139.269915] RBP: ffff88001b633db0 R08: 000000000000002c R09: 000000000000000a
- [ 139.269915] R10: 0000000000062180 R11: 00000020759fb9dc R12: ffffffffa0292c20
- [ 139.269915] R13: ffff88001dfc5334 R14: 0000000000000000 R15: 0000000000000000
- [ 139.269915] FS: 0000000000000000(0000) GS:ffff88001fc00000(0000) knlGS:0000000000000000
- [ 139.269915] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
- [ 139.269915] CR2: 0000000000000004 CR3: 000000001d290000 CR4: 00000000000006f0
- [ 139.269915] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
- [ 139.269915] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
- [ 139.269915] Stack:
- [ 139.269915] 000000001b633d98 0000000000000246 ffff88001df1dc00 ffff88001cc37a00
- [ 139.269915] ffff88001bc35e60 0000000000000000 ffff88001ffa0a48 ffff88001bc35ee0
- [ 139.269915] ffff88001b633e08 ffffffffa02704b5 0000000000010000 ffff88001cc37a70
- [ 139.269915] Call Trace:
- [ 139.269915] [<ffffffffa02704b5>] __rpc_execute+0x1d5/0x400 [sunrpc]
- [ 139.269915] [<ffffffffa0270706>] rpc_async_schedule+0x26/0x30 [sunrpc]
- [ 139.269915] [<ffffffff8107867b>] process_one_work+0x17b/0x460
- [ 139.269915] [<ffffffff8107942b>] worker_thread+0x11b/0x400
- [ 139.269915] [<ffffffff81079310>] ? rescuer_thread+0x3e0/0x3e0
- [ 139.269915] [<ffffffff8107fc80>] kthread+0xc0/0xd0
- [ 139.269915] [<ffffffff8107fbc0>] ? kthread_create_on_node+0x110/0x110
- [ 139.269915] [<ffffffff815d122c>] ret_from_fork+0x7c/0xb0
- [ 139.269915] [<ffffffff8107fbc0>] ? kthread_create_on_node+0x110/0x110
- [ 139.269915] Code: 4c 8b 45 c8 48 8d 7d d0 89 4d c4 41 89 c9 b9 28 00 00 00 e8 9d b4 e9
- e0 48 85 c0 49 89 c5 74 a2 48 89 c7 e8 9d 3f e9 e0 48 89 c2 <41> 8b 46 04 48 8b 7d d0 4c
- 89 e9 4c 89 e6 89 42 0c 0f b7 83 d4
- [ 139.269915] RIP [<ffffffffa026f216>] ftrace_raw_event_rpc_task_running+0x86/0xf0 [sunrpc]
- [ 139.269915] RSP <ffff88001b633d70>
- [ 139.269915] CR2: 0000000000000004
- [ 140.946406] ---[ end trace ba486328b98d7622 ]---
-
-Upstream-Status: Backport [https://github.com/lttng/lttng-modules/commit/2b228b503cad10bf0c5a99b42a908ca906eab5b9]
-
-Signed-off-by: Quanyang Wang <quanyang.wang@windriver.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- instrumentation/events/lttng-module/rpc.h | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/instrumentation/events/lttng-module/rpc.h b/instrumentation/events/lttng-module/rpc.h
-index 3798e8e..fb13106 100644
---- a/instrumentation/events/lttng-module/rpc.h
-+++ b/instrumentation/events/lttng-module/rpc.h
-@@ -139,7 +139,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_running,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(unsigned int, client_id, task->tk_client ? task->tk_client->cl_clid : -1)
- ctf_integer_hex(const void *, action, action)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
-@@ -208,7 +208,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_running,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(unsigned int, client_id, task->tk_client ? task->tk_client->cl_clid : -1)
- ctf_integer_hex(const void *, action, action)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
---
-2.17.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0002-Fix-sunrpc-null-rpc_clnt-dereference-in-rpc_task_que.patch b/meta/recipes-kernel/lttng/lttng-modules/0002-Fix-sunrpc-null-rpc_clnt-dereference-in-rpc_task_que.patch
deleted file mode 100644
index 03264bac68..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0002-Fix-sunrpc-null-rpc_clnt-dereference-in-rpc_task_que.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From 032a74d83b263c4faead8e4c25d497fb8ea07b6e Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Thu, 12 Dec 2019 10:29:02 -0500
-Subject: [PATCH 2/4] Fix: sunrpc: null rpc_clnt dereference in rpc_task_queued
- tracepoint
-
-Based on upstream Linux commit:
-
-commit 0be283f676a1e7b208db0c992283197ef8b52158
-Author: Benjamin Coddington <bcodding@redhat.com>
-Date: Tue Jan 23 09:32:35 2018 -0500
-
- SUNRPC: Fix null rpc_clnt dereference in rpc_task_queued tracepoint
-
- Backchannel tasks will not have a reference to the rpc_clnt. Return -1 for
- cl_clid in that case.
-
- Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
- Signed-off-by: Trond Myklebust <trondmy@gmail.com>
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Upstream-Status: Backport [https://github.com/lttng/lttng-modules/commit/8f83a9103dcdf4f6b73783427fc5ded4869309d5]
-Signed-off-by: Quanyang Wang <quanyang.wang@windriver.com>
----
- instrumentation/events/lttng-module/rpc.h | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/instrumentation/events/lttng-module/rpc.h b/instrumentation/events/lttng-module/rpc.h
-index fb13106..68c622c 100644
---- a/instrumentation/events/lttng-module/rpc.h
-+++ b/instrumentation/events/lttng-module/rpc.h
-@@ -176,7 +176,8 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_queued,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(unsigned int, client_id, task->tk_client ?
-+ task->tk_client->cl_clid : -1)
- ctf_integer(unsigned long, timeout, task->tk_timeout)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
---
-2.17.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0003-Fix-sunrpc-use-signed-integer-for-client-id.patch b/meta/recipes-kernel/lttng/lttng-modules/0003-Fix-sunrpc-use-signed-integer-for-client-id.patch
deleted file mode 100644
index c7529f16dd..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0003-Fix-sunrpc-use-signed-integer-for-client-id.patch
+++ /dev/null
@@ -1,105 +0,0 @@
-From 70389e422dd3146161089d454f525367c9046ecd Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Thu, 12 Dec 2019 10:29:37 -0500
-Subject: [PATCH 3/4] Fix: sunrpc: use signed integer for client id
-
-Within include/linux/sunrpc/clnt.h:struct rpc_cltn, the cl_clid field
-is an unsigned integer, which is the type expected by the tracepoint
-signature.
-
-However, looking into net/sunrpc/clnt.c:rpc_alloc_clid(), its allocation
-considers negative signed integer as errors.
-
-Therefore, in order to properly show "-1" in the trace output (rather
-than MAX_INT) when called with a NULL task->tk_client, move to a
-signed integer as backing type for the client_id field.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Upstream-Status: Backport [https://github.com/lttng/lttng-modules/commit/cc7bb0aa52cae22255581d67841449bb8ea36fda]
-Signed-off-by: Quanyang Wang <quanyang.wang@windriver.com>
----
- instrumentation/events/lttng-module/rpc.h | 19 +++++++++++--------
- 1 file changed, 11 insertions(+), 8 deletions(-)
-
-diff --git a/instrumentation/events/lttng-module/rpc.h b/instrumentation/events/lttng-module/rpc.h
-index 68c622c..2d06e55 100644
---- a/instrumentation/events/lttng-module/rpc.h
-+++ b/instrumentation/events/lttng-module/rpc.h
-@@ -18,7 +18,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_status,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, task->tk_client->cl_clid)
- ctf_integer(int, status, task->tk_status)
- )
- )
-@@ -43,7 +43,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_status,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, task->tk_client->cl_clid)
- ctf_integer(int, status, task->tk_status)
- )
- )
-@@ -100,7 +100,7 @@ LTTNG_TRACEPOINT_EVENT(rpc_connect_status,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, task->tk_client->cl_clid)
- ctf_integer(int, status, task->tk_status)
- )
- )
-@@ -112,7 +112,7 @@ LTTNG_TRACEPOINT_EVENT(rpc_connect_status,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, task->tk_client->cl_clid)
- ctf_integer(int, status, status)
- )
- )
-@@ -139,7 +139,8 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_running,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client ? task->tk_client->cl_clid : -1)
-+ ctf_integer(int, client_id, task->tk_client ?
-+ task->tk_client->cl_clid : -1)
- ctf_integer_hex(const void *, action, action)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
-@@ -176,7 +177,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_queued,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client ?
-+ ctf_integer(int, client_id, task->tk_client ?
- task->tk_client->cl_clid : -1)
- ctf_integer(unsigned long, timeout, task->tk_timeout)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
-@@ -209,7 +210,8 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_running,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client ? task->tk_client->cl_clid : -1)
-+ ctf_integer(int, client_id, task->tk_client ?
-+ task->tk_client->cl_clid : -1)
- ctf_integer_hex(const void *, action, action)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
-@@ -246,7 +248,8 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_queued,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(unsigned int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, task->tk_client ?
-+ task->tk_client->cl_clid : -1)
- ctf_integer(unsigned long, timeout, task->tk_timeout)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
---
-2.17.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0004-sunrpc-introduce-lttng_get_clid-helper.patch b/meta/recipes-kernel/lttng/lttng-modules/0004-sunrpc-introduce-lttng_get_clid-helper.patch
deleted file mode 100644
index 4dd726cf2c..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0004-sunrpc-introduce-lttng_get_clid-helper.patch
+++ /dev/null
@@ -1,130 +0,0 @@
-From b6903d57e4c3234ec5b1c7f72e232023cdee0fab Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Thu, 12 Dec 2019 10:39:38 -0500
-Subject: [PATCH 4/4] sunrpc: introduce lttng_get_clid helper
-
-Introduce the lttng_get_clid helper to always check for NULL pointer
-when getting the client id. While not always strictly needed depending
-on the tracepoint callsite, prefer robustness of instrumentation and
-always check for NULL rather than play whack-a-mole.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Upstream-Status: Backport [https://github.com/lttng/lttng-modules/commit/1330a091a687a406513c3a326c2fc2a0dbe75536]
-Signed-off-by: Quanyang Wang <quanyang.wang@windriver.com>
----
- instrumentation/events/lttng-module/rpc.h | 43 ++++++++++++++++-------
- 1 file changed, 31 insertions(+), 12 deletions(-)
-
-diff --git a/instrumentation/events/lttng-module/rpc.h b/instrumentation/events/lttng-module/rpc.h
-index 2d06e55..ceaf9db 100644
---- a/instrumentation/events/lttng-module/rpc.h
-+++ b/instrumentation/events/lttng-module/rpc.h
-@@ -9,6 +9,29 @@
- #include <linux/sunrpc/sched.h>
- #include <linux/sunrpc/clnt.h>
-
-+#ifndef ONCE_LTTNG_RPC_H
-+#define ONCE_LTTNG_RPC_H
-+
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
-+static inline
-+int lttng_get_clid(const struct rpc_task *task)
-+{
-+ struct rpc_clnt *tk_client;
-+
-+ tk_client = task->tk_client;
-+ if (!tk_client)
-+ return -1;
-+ /*
-+ * The cl_clid field is always initialized to positive signed
-+ * integers. Negative signed integer values are treated as
-+ * errors.
-+ */
-+ return (int) tk_client->cl_clid;
-+}
-+#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
-+
-+#endif /* ONCE_LTTNG_RPC_H */
-+
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0))
- LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_status,
-
-@@ -18,7 +41,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_status,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, lttng_get_clid(task))
- ctf_integer(int, status, task->tk_status)
- )
- )
-@@ -43,7 +66,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_status,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, lttng_get_clid(task))
- ctf_integer(int, status, task->tk_status)
- )
- )
-@@ -100,7 +123,7 @@ LTTNG_TRACEPOINT_EVENT(rpc_connect_status,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, lttng_get_clid(task))
- ctf_integer(int, status, task->tk_status)
- )
- )
-@@ -112,7 +135,7 @@ LTTNG_TRACEPOINT_EVENT(rpc_connect_status,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(int, client_id, task->tk_client->cl_clid)
-+ ctf_integer(int, client_id, lttng_get_clid(task))
- ctf_integer(int, status, status)
- )
- )
-@@ -139,8 +162,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_running,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(int, client_id, task->tk_client ?
-- task->tk_client->cl_clid : -1)
-+ ctf_integer(int, client_id, lttng_get_clid(task))
- ctf_integer_hex(const void *, action, action)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
-@@ -177,8 +199,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_queued,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(int, client_id, task->tk_client ?
-- task->tk_client->cl_clid : -1)
-+ ctf_integer(int, client_id, lttng_get_clid(task))
- ctf_integer(unsigned long, timeout, task->tk_timeout)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
-@@ -210,8 +231,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_running,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(int, client_id, task->tk_client ?
-- task->tk_client->cl_clid : -1)
-+ ctf_integer(int, client_id, lttng_get_clid(task))
- ctf_integer_hex(const void *, action, action)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
-@@ -248,8 +268,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(rpc_task_queued,
-
- TP_FIELDS(
- ctf_integer(unsigned int, task_id, task->tk_pid)
-- ctf_integer(int, client_id, task->tk_client ?
-- task->tk_client->cl_clid : -1)
-+ ctf_integer(int, client_id, lttng_get_clid(task))
- ctf_integer(unsigned long, timeout, task->tk_timeout)
- ctf_integer(unsigned long, runstate, task->tk_runstate)
- ctf_integer(int, status, task->tk_status)
---
-2.17.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules_2.10.11.bb b/meta/recipes-kernel/lttng/lttng-modules_2.10.14.bb
index cc4f44519a..1c24e94902 100644
--- a/meta/recipes-kernel/lttng/lttng-modules_2.10.11.bb
+++ b/meta/recipes-kernel/lttng/lttng-modules_2.10.14.bb
@@ -14,14 +14,10 @@ COMPATIBLE_HOST = '(x86_64|i.86|powerpc|aarch64|mips|nios2|arm|riscv).*-linux'
SRC_URI = "https://lttng.org/files/${BPN}/${BPN}-${PV}.tar.bz2 \
file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \
file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \
- file://0001-Fix-SUNRPC-Fix-oops-when-trace-sunrpc_task-events-in.patch \
- file://0002-Fix-sunrpc-null-rpc_clnt-dereference-in-rpc_task_que.patch \
- file://0003-Fix-sunrpc-use-signed-integer-for-client-id.patch \
- file://0004-sunrpc-introduce-lttng_get_clid-helper.patch \
"
-SRC_URI[md5sum] = "c618fb646514dfc1bf910cfd7cda4256"
-SRC_URI[sha256sum] = "7f91e39b2e8e46d8bbba2b4c8c1614f1fb380611cd1a1fccc1d1859be26112f1"
+SRC_URI[md5sum] = "3e9ed67a2da17edf93194f8a5e75a246"
+SRC_URI[sha256sum] = "d0ba614a9cac3daf8ac034837f8b786e6be2ce0242aeecef7096bed5e03b762c"
export INSTALL_MOD_DIR="kernel/lttng-modules"
@@ -44,7 +40,7 @@ SRC_URI_class-devupstream = "git://git.lttng.org/lttng-modules;branch=stable-2.1
file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \
file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \
"
-SRCREV_class-devupstream = "624aca5d7507fbd11ea4a1a474c3aa1031bd9a31"
-PV_class-devupstream = "2.10.10+git${SRCPV}"
+SRCREV_class-devupstream = "b34304f146ea234ea764580d7ce1b03d05a215f9"
+PV_class-devupstream = "2.10.14+git${SRCPV}"
S_class-devupstream = "${WORKDIR}/git"
SRCREV_FORMAT ?= "lttng_git"
diff --git a/meta/recipes-kernel/perf/perf.bb b/meta/recipes-kernel/perf/perf.bb
index 8201c0cb60..904aca95de 100644
--- a/meta/recipes-kernel/perf/perf.bb
+++ b/meta/recipes-kernel/perf/perf.bb
@@ -51,7 +51,7 @@ export PYTHON_SITEPACKAGES_DIR
#kernel 3.1+ supports WERROR to disable warnings as errors
export WERROR = "0"
-do_populate_lic[depends] += "virtual/kernel:do_patch"
+do_populate_lic[depends] += "virtual/kernel:do_shared_workdir"
# needed for building the tools/perf Perl binding
include ${@bb.utils.contains('PACKAGECONFIG', 'scripting', 'perf-perl.inc', '', d)}
@@ -233,10 +233,8 @@ do_configure_prepend () {
fi
# use /usr/bin/env instead of version specific python
- for s in `find ${S}/tools/perf/ -name '*.py'`; do
- sed -i 's,/usr/bin/python,/usr/bin/env python3,' "${s}"
- sed -i 's,/usr/bin/python2,/usr/bin/env python3,' "${s}"
- sed -i 's,/usr/bin/env python2,/usr/bin/env python3,' "${s}"
+ for s in `find ${S}/tools/perf/ -name '*.py'` `find ${S}/scripts/ -name 'bpf_helpers_doc.py'`; do
+ sed -i -e "s,#!.*python.*,#!${USRBINPATH}/env python3," ${s}
done
# unistd.h can be out of sync between libc-headers and the captured version in the perf source
diff --git a/meta/recipes-kernel/wireless-regdb/wireless-regdb_2019.06.03.bb b/meta/recipes-kernel/wireless-regdb/wireless-regdb_2020.04.29.bb
index 9076d94601..a5827b9ef0 100644
--- a/meta/recipes-kernel/wireless-regdb/wireless-regdb_2019.06.03.bb
+++ b/meta/recipes-kernel/wireless-regdb/wireless-regdb_2020.04.29.bb
@@ -5,8 +5,7 @@ LICENSE = "ISC"
LIC_FILES_CHKSUM = "file://LICENSE;md5=07c4f6dea3845b02a18dc00c8c87699c"
SRC_URI = "https://www.kernel.org/pub/software/network/${BPN}/${BP}.tar.xz"
-SRC_URI[md5sum] = "4b5ba3f089db7fdb7b9daae6a7c1f2cb"
-SRC_URI[sha256sum] = "cd917ed86b63ce8d93947979f1f18948f03a4ac0ad89ec25227b36ac00dc54bf"
+SRC_URI[sha256sum] = "89fd031aed5977c219a71501e144375a10e7c90d1005d5d086ea7972886a2c7a"
inherit bin_package allarch
diff --git a/meta/recipes-multimedia/gstreamer/gst-validate_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gst-validate_1.16.2.bb
index 7d602eabc6..35492fe861 100644
--- a/meta/recipes-multimedia/gstreamer/gst-validate_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gst-validate_1.16.2.bb
@@ -9,8 +9,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=a6f89e2100d9b6cdffcea4f398e37343"
SRC_URI = "https://gstreamer.freedesktop.org/src/${BPN}/${BP}.tar.xz \
file://0001-connect-has-a-different-signature-on-musl.patch \
"
-SRC_URI[md5sum] = "793e75f4717f718ad204c554d577b160"
-SRC_URI[sha256sum] = "7f079b9b2a127604b98e297037dc8847ef50f4ce2b508aa2df0cac5b77562899"
+SRC_URI[md5sum] = "688f42c52d62e8c5e506df911553fb2c"
+SRC_URI[sha256sum] = "4861ccb9326200e74d98007e316b387d48dd49f072e0b78cb9d3303fdecfeeca"
DEPENDS = "json-glib glib-2.0 glib-2.0-native gstreamer1.0 gstreamer1.0-plugins-base"
RRECOMMENDS_${PN} = "git"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.2.bb
index 10955ff161..b57b744a80 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.16.2.bb
@@ -19,8 +19,8 @@ SRC_URI = "https://gstreamer.freedesktop.org/src/gst-libav/gst-libav-${PV}.tar.x
file://0001-configure-check-for-armv7ve-variant.patch \
file://0001-fix-host-contamination.patch \
"
-SRC_URI[md5sum] = "58023f4c71bbd711061e350fcd76c09d"
-SRC_URI[sha256sum] = "e8a5748ae9a4a7be9696512182ea9ffa6efe0be9b7976916548e9d4381ca61c4"
+SRC_URI[md5sum] = "eacebd0136ede3a9bd3672eeb338806b"
+SRC_URI[sha256sum] = "c724f612700c15a933c7356fbeabb0bb9571fb5538f8b1b54d4d2d94188deef2"
S = "${WORKDIR}/gst-libav-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.2.bb
index cb2f7045a8..c0acf46c22 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx_1.16.2.bb
@@ -9,8 +9,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c \
SRC_URI = "https://gstreamer.freedesktop.org/src/gst-omx/gst-omx-${PV}.tar.xz"
-SRC_URI[md5sum] = "89772e7a277fd0abfc250eaf8e4e9ce9"
-SRC_URI[sha256sum] = "cbf54121a2cba575d460833e8132265781252ce32cf5b8f9fa8753e42ab24bb2"
+SRC_URI[md5sum] = "6362786d2b6cce34de08c86b7847f782"
+SRC_URI[sha256sum] = "11ed411a2eba75610d72331eeb14ff05e2df28f4fd05cb69225a88bec6d27439"
S = "${WORKDIR}/gst-omx-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.2.bb
index 1731be8441..756b823e7d 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.16.2.bb
@@ -8,8 +8,8 @@ SRC_URI = " \
file://ensure-valid-sentinels-for-gst_structure_get-etc.patch \
file://0001-introspection.m4-prefix-pkgconfig-paths-with-PKG_CON.patch \
"
-SRC_URI[md5sum] = "24d4d30ecc67d5cbc77c0475bcea1210"
-SRC_URI[sha256sum] = "56481c95339b8985af13bac19b18bc8da7118c2a7d9440ed70e7dcd799c2adb5"
+SRC_URI[md5sum] = "ccc7404230afddec723bbdb63c89feec"
+SRC_URI[sha256sum] = "f1cb7aa2389569a5343661aae473f0a940a90b872001824bc47fa8072a041e74"
S = "${WORKDIR}/gst-plugins-bad-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.2.bb
index cb99fba5ff..95d3a3679e 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base_1.16.2.bb
@@ -18,8 +18,8 @@ SRC_URI = " \
file://0001-gstreamer-gl.pc.in-don-t-append-GL_CFLAGS-to-CFLAGS.patch \
file://link-with-libvchostif.patch \
"
-SRC_URI[md5sum] = "b5eb0651bab70bf1714f103bdd66ce47"
-SRC_URI[sha256sum] = "5c3cc489933d0597087c9bc6ba251c93693d64554bcc563539a084fa2d5fcb2b"
+SRC_URI[md5sum] = "3fdb32823535799a748c1fc14f978e2c"
+SRC_URI[sha256sum] = "b13e73e2fe74a4166552f9577c3dcb24bed077021b9c7fa600d910ec6987816a"
S = "${WORKDIR}/gst-plugins-base-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.2.bb
index 0fa7b86ffe..ea0cbddc72 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.2.bb
@@ -5,8 +5,8 @@ SRC_URI = " \
file://0001-introspection.m4-prefix-pkgconfig-paths-with-PKG_CON.patch \
"
-SRC_URI[md5sum] = "515987ee763256840a11bd8ea098f2bf"
-SRC_URI[sha256sum] = "9fbabe69018fcec707df0b71150168776040cde6c1a26bb5a82a136755fa8f1f"
+SRC_URI[md5sum] = "bd025f8f14974f94b75ac69a9d1b9c93"
+SRC_URI[sha256sum] = "40bb3bafda25c0b739c8fc36e48380fccf61c4d3f83747e97ac3f9b0171b1319"
S = "${WORKDIR}/gst-plugins-good-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.2.bb
index ecab318899..94abc33542 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-ugly_1.16.2.bb
@@ -10,8 +10,8 @@ SRC_URI = " \
https://gstreamer.freedesktop.org/src/gst-plugins-ugly/gst-plugins-ugly-${PV}.tar.xz \
file://0001-introspection.m4-prefix-pkgconfig-paths-with-PKG_CON.patch \
"
-SRC_URI[md5sum] = "668795903cb4971fba9aa89abdea8369"
-SRC_URI[sha256sum] = "4bf913b2ca5195ac3b53b5e3ade2dc7c45d2258507552ddc850c5fa425968a1d"
+SRC_URI[md5sum] = "10283ff5ef1e34d462dde77042e329bd"
+SRC_URI[sha256sum] = "5500415b865e8b62775d4742cbb9f37146a50caecfc0e7a6fc0160d3c560fbca"
S = "${WORKDIR}/gst-plugins-ugly-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins.inc b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins.inc
index bc24b05fec..92b473add6 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins.inc
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins.inc
@@ -3,7 +3,7 @@ HOMEPAGE = "http://gstreamer.freedesktop.org/"
BUGTRACKER = "https://bugzilla.gnome.org/enter_bug.cgi?product=Gstreamer"
SECTION = "multimedia"
-DEPENDS = "gstreamer1.0 glib-2.0-native"
+DEPENDS = "gstreamer1.0 glib-2.0-native make-native"
SRC_URI_append = " file://gtk-doc-tweaks.patch"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-python/0001-meson.build-fix-builds-with-python-3.8.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-python/0001-meson.build-fix-builds-with-python-3.8.patch
new file mode 100644
index 0000000000..053108ad50
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-python/0001-meson.build-fix-builds-with-python-3.8.patch
@@ -0,0 +1,24 @@
+From 61cfd1b49dc82baf14bb36d88b6c5be7b8c3d23a Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Mon, 2 Dec 2019 18:16:41 +0100
+Subject: [PATCH] meson.build: fix builds with python 3.8
+
+Upstream-Status: Submitted [https://gitlab.freedesktop.org/gstreamer/gst-python/merge_requests/14]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ meson.build | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/meson.build b/meson.build
+index 1da81d5..3e0db38 100644
+--- a/meson.build
++++ b/meson.build
+@@ -24,7 +24,7 @@ pygobject_dep = dependency('pygobject-3.0', fallback: ['pygobject', 'pygobject_d
+
+ pymod = import('python')
+ python = pymod.find_installation(get_option('python'))
+-python_dep = python.dependency(required : true)
++python_dep = dependency('python3-embed', required : true)
+
+ python_abi_flags = python.get_variable('ABIFLAGS', '')
+ pylib_loc = get_option('libpython-dir')
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.2.bb
index 5a950f183c..989556ce8b 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-python_1.16.2.bb
@@ -5,9 +5,11 @@ SECTION = "multimedia"
LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=c34deae4e395ca07e725ab0076a5f740"
-SRC_URI = "https://gstreamer.freedesktop.org/src/${PNREAL}/${PNREAL}-${PV}.tar.xz"
-SRC_URI[md5sum] = "499645fbd1790c5845c02a3998dccc1b"
-SRC_URI[sha256sum] = "b469c8955126f41b8ce0bf689b7029f182cd305f422b3a8df35b780bd8347489"
+SRC_URI = "https://gstreamer.freedesktop.org/src/${PNREAL}/${PNREAL}-${PV}.tar.xz \
+ file://0001-meson.build-fix-builds-with-python-3.8.patch \
+ "
+SRC_URI[md5sum] = "6ac709767334d8d0a71cb4e016f6abeb"
+SRC_URI[sha256sum] = "208df3148d73d9f416d016564737585d8ea763d91201732d44b5fe688c6288a8"
DEPENDS = "gstreamer1.0 python3-pygobject"
RDEPENDS_${PN} += "gstreamer1.0 python3-pygobject"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.2.bb
index 45302ef4f6..b7470b0047 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-rtsp-server_1.16.2.bb
@@ -4,7 +4,7 @@ SECTION = "multimedia"
LICENSE = "LGPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=6762ed442b3822387a51c92d928ead0d"
-DEPENDS = "gstreamer1.0 gstreamer1.0-plugins-base"
+DEPENDS = "gstreamer1.0 gstreamer1.0-plugins-base make-native"
PNREAL = "gst-rtsp-server"
@@ -13,8 +13,8 @@ SRC_URI = "https://gstreamer.freedesktop.org/src/${PNREAL}/${PNREAL}-${PV}.tar.x
file://gtk-doc-tweaks.patch \
"
-SRC_URI[md5sum] = "380d6a42e856c32fcefa508ad57129e0"
-SRC_URI[sha256sum] = "b0abacad2f86f60d63781d2b24443c5668733e8b08664bbef94124906d700144"
+SRC_URI[md5sum] = "8a998725820c771ba45be6e18bfdf73a"
+SRC_URI[sha256sum] = "de07a2837b3b04820ce68264a4909f70c221b85dbff0cede7926e9cdbb1dc26e"
S = "${WORKDIR}/${PNREAL}-${PV}"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.2.bb
index 61cf705fd8..3170218abd 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-vaapi_1.16.2.bb
@@ -13,8 +13,8 @@ SRC_URI = "https://gstreamer.freedesktop.org/src/${REALPN}/${REALPN}-${PV}.tar.x
file://0001-vaapsink-downgrade-to-marginal.patch \
"
-SRC_URI[md5sum] = "15b08f76777359d87b0b4a561db05f1f"
-SRC_URI[sha256sum] = "cb570f6f1e78cb364fbe3c4fb8751824ee9db0c942ba61b62380b9b5abb7603a"
+SRC_URI[md5sum] = "13f7cb6a64bde24e67f563377487dcce"
+SRC_URI[sha256sum] = "191de7b0ab64a85dd0875c990721e7be95518f60e2a9106beca162004ed7c601"
S = "${WORKDIR}/${REALPN}-${PV}"
DEPENDS = "libva gstreamer1.0 gstreamer1.0-plugins-base gstreamer1.0-plugins-bad"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.1.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.2.bb
index ff92f63bac..96a6ade22b 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.1.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.2.bb
@@ -6,7 +6,7 @@ BUGTRACKER = "https://bugzilla.gnome.org/enter_bug.cgi?product=Gstreamer"
SECTION = "multimedia"
LICENSE = "LGPLv2+"
-DEPENDS = "glib-2.0 glib-2.0-native libcap libxml2 bison-native flex-native"
+DEPENDS = "glib-2.0 glib-2.0-native libcap libxml2 bison-native flex-native make-native"
inherit autotools pkgconfig gettext upstream-version-is-even gobject-introspection gtk-doc ptest
@@ -27,8 +27,8 @@ SRC_URI = " \
file://add-a-target-to-compile-tests.patch \
file://run-ptest \
"
-SRC_URI[md5sum] = "c505fb818b36988daaa846e9e63eabe8"
-SRC_URI[sha256sum] = "02211c3447c4daa55919c5c0f43a82a6fbb51740d57fc3af0639d46f1cf4377d"
+SRC_URI[md5sum] = "0e661ed5bdf1d8996e430228d022628e"
+SRC_URI[sha256sum] = "e3f044246783fd685439647373fa13ba14f7ab0b346eadd06437092f8419e94e"
PACKAGECONFIG ??= "${@bb.utils.contains('PTEST_ENABLED', '1', 'tests', '', d)} \
"
diff --git a/meta/recipes-sato/webkit/webkitgtk/fix-link-error.patch b/meta/recipes-sato/webkit/webkitgtk/fix-link-error.patch
new file mode 100755
index 0000000000..9696ddd691
--- /dev/null
+++ b/meta/recipes-sato/webkit/webkitgtk/fix-link-error.patch
@@ -0,0 +1,45 @@
+webkitgtk: fix an occasional link error
+
+Part of ae465a4e... Changelog is not included in the source tarball.
+
+Upstream-Status: backport [git://git.webkit.org/WebKit.git]
+
+commit ae465a4e3b1498b6c4038fc7e596e0e3662d116f
+Author: Hironori.Fujii@sony.com <Hironori.Fujii@sony.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
+Date: Fri Jun 28 07:38:09 2019 +0000
+
+ [Win] unresolved external symbol "JSC::JSObject::didBecomePrototype(void)" referenced in function "JSC::Structure::create(...)"
+ https://bugs.webkit.org/show_bug.cgi?id=199312
+
+ Reviewed by Keith Miller.
+
+ WinCairo port, clang-cl Release builds reported a following linkage error:
+
+ > WebCore.lib(UnifiedSource-4babe430-10.cpp.obj) : error LNK2019: unresolved external symbol "public: void __cdecl JSC::JSObject::didBecomePrototype(void)" (?didBecomePrototype@JSObject@JSC@@QEAAXXZ) referenced in function "public: static class JSC::Structure * __cdecl JSC::Structure::create(class JSC::VM &,class JSC::JSGlobalObject *,class JSC::JSValue,class JSC::TypeInfo const &,struct JSC::ClassInfo const *,unsigned char,unsigned int)" (?create@Structure@JSC@@SAPEAV12@AEAVVM@2@PEAVJSGlobalObject@2@VJSValue@2@AEBVTypeInfo@2@PEBUClassInfo@2@EI@Z)
+
+ No new tests because there is no behavior change.
+
+ * Modules/indexeddb/server/SQLiteIDBBackingStore.cpp: Include <JavaScriptCore/JSCInlines.h>,
+ and do not include headers which is included by it.
+
+ git-svn-id: http://svn.webkit.org/repository/webkit/trunk@246922 268f45cc-cd09-0410-ab3c-d52691b4dbfc
+
+[ modification of Changelog deleted ]
+
+diff --git a/Source/WebCore/Modules/indexeddb/server/SQLiteIDBBackingStore.cpp b/Source/WebCore/Modules/indexeddb/server/SQLiteIDBBackingStore.cpp
+index d1b047c..0899a9a 100644
+--- a/Source/WebCore/Modules/indexeddb/server/SQLiteIDBBackingStore.cpp
++++ b/Source/WebCore/Modules/indexeddb/server/SQLiteIDBBackingStore.cpp
+@@ -49,11 +49,8 @@
+ #include "SQLiteTransaction.h"
+ #include "ThreadSafeDataBuffer.h"
+ #include <JavaScriptCore/AuxiliaryBarrierInlines.h>
+-#include <JavaScriptCore/HeapInlines.h>
+-#include <JavaScriptCore/JSCJSValueInlines.h>
+-#include <JavaScriptCore/JSGlobalObject.h>
++#include <JavaScriptCore/JSCInlines.h>
+ #include <JavaScriptCore/StrongInlines.h>
+-#include <JavaScriptCore/StructureInlines.h>
+ #include <wtf/FileSystem.h>
+ #include <wtf/NeverDestroyed.h>
+ #include <wtf/text/StringConcatenateNumbers.h>
diff --git a/meta/recipes-sato/webkit/webkitgtk_2.24.4.bb b/meta/recipes-sato/webkit/webkitgtk_2.24.4.bb
index c090782411..1c71762945 100644
--- a/meta/recipes-sato/webkit/webkitgtk_2.24.4.bb
+++ b/meta/recipes-sato/webkit/webkitgtk_2.24.4.bb
@@ -23,6 +23,7 @@ SRC_URI = "https://www.webkitgtk.org/releases/${BPN}-${PV}.tar.xz \
file://include_array.patch \
file://narrowing.patch \
file://0001-gstreamer-add-a-missing-format-string.patch \
+ file://fix-link-error.patch \
"
SRC_URI[md5sum] = "c214963d8c0e7d83460da04a0d8dda87"
diff --git a/meta/recipes-support/aspell/aspell/CVE-2019-20433-0001.patch b/meta/recipes-support/aspell/aspell/CVE-2019-20433-0001.patch
new file mode 100644
index 0000000000..fd68461e32
--- /dev/null
+++ b/meta/recipes-support/aspell/aspell/CVE-2019-20433-0001.patch
@@ -0,0 +1,999 @@
+From de29341638833ba7717bd6b5e6850998454b044b Mon Sep 17 00:00:00 2001
+From: Kevin Atkinson <kevina@gnu.org>
+Date: Sat, 17 Aug 2019 17:06:53 -0400
+Subject: [PATCH 1/2] Don't allow null-terminated UCS-2/4 strings using the
+ original API.
+
+Detect if the encoding is UCS-2/4 and the length is -1 in affected API
+functions and refuse to convert the string. If the string ends up
+being converted somehow, abort with an error message in DecodeDirect
+and ConvDirect. To convert a null terminated string in
+Decode/ConvDirect, a negative number corresponding to the width of the
+underlying character type for the encoding is expected; for example,
+if the encoding is "ucs-2" then a the size is expected to be -2.
+
+Also fix a 1-3 byte over-read in DecodeDirect when reading UCS-2/4
+strings when a size is provided (found by OSS-Fuzz).
+
+Also fix a bug in DecodeDirect that caused DocumentChecker to return
+the wrong offsets when working with UCS-2/4 strings.
+
+CVE: CVE-2019-20433
+Upstream-Status: Backport [https://github.com/GNUAspell/aspell/commit/de29341638833ba7717bd6b5e6850998454b044b]
+
+[SG: - adjusted context
+ - discarded test changes as test framework is not available
+ - discarded manual entry changes for features that aren't backported]
+Signed-off-by: Stefan Ghinea <stefan.ghinea@windriver.com>
+---
+ auto/MkSrc/CcHelper.pm | 99 ++++++++++++++++++++++++++++++++++---
+ auto/MkSrc/Create.pm | 5 +-
+ auto/MkSrc/Info.pm | 5 +-
+ auto/MkSrc/ProcCc.pm | 24 +++++----
+ auto/MkSrc/ProcImpl.pm | 57 +++++++++++++++------
+ auto/MkSrc/Read.pm | 4 +-
+ auto/mk-src.in | 44 +++++++++++++++--
+ common/convert.cpp | 39 ++++++++++++---
+ common/convert.hpp | 38 +++++++++++++-
+ common/document_checker.cpp | 17 ++++++-
+ common/document_checker.hpp | 1 +
+ common/version.cpp | 15 ++++--
+ configure.ac | 8 +++
+ manual/aspell.texi | 58 ++++++++++++++++------
+ manual/readme.texi | 70 +++++++++++++++++++++-----
+ 15 files changed, 409 insertions(+), 75 deletions(-)
+
+diff --git a/auto/MkSrc/CcHelper.pm b/auto/MkSrc/CcHelper.pm
+index f2de991..0044335 100644
+--- a/auto/MkSrc/CcHelper.pm
++++ b/auto/MkSrc/CcHelper.pm
+@@ -10,8 +10,8 @@ BEGIN {
+ use Exporter;
+ our @ISA = qw(Exporter);
+ our @EXPORT = qw(to_c_return_type c_error_cond
+- to_type_name make_desc make_func call_func
+- make_c_method call_c_method form_c_method
++ to_type_name make_desc make_func call_func get_c_func_name
++ make_c_method make_wide_macro call_c_method form_c_method
+ make_cxx_method);
+ }
+
+@@ -90,6 +90,69 @@ sub make_func ( $ \@ $ ; \% ) {
+ ')'));
+ }
+
++=item make_wide_version NAME @TYPES PARMS ; %ACCUM
++
++Creates the wide character version of the function if needed
++
++=cut
++
++sub make_wide_version ( $ \@ $ ; \% ) {
++ my ($name, $d, $p, $accum) = @_;
++ my @d = @$d;
++ shift @d;
++ return '' unless grep {$_->{type} eq 'encoded string'} @d;
++ $accum->{sys_headers}{'stddef.h'} = true;
++ $accum->{suffix}[5] = <<'---';
++
++/******************* private implemantion details *********************/
++
++#ifdef __cplusplus
++# define aspell_cast_(type, expr) (static_cast<type>(expr))
++# define aspell_cast_from_wide_(str) (static_cast<const void *>(str))
++#else
++# define aspell_cast_(type, expr) ((type)(expr))
++# define aspell_cast_from_wide_(str) ((const char *)(str))
++#endif
++---
++ my @parms = map {$_->{type} eq 'encoded string'
++ ? ($_->{name}, $_->{name}.'_size')
++ : $_->{name}} @d;
++ $name = to_lower $name;
++ $accum->{suffix}[0] = <<'---';
++/**********************************************************************/
++
++#ifdef ASPELL_ENCODE_SETTING_SECURE
++---
++ $accum->{suffix}[2] = "#endif\n";
++ my @args = map {$_->{type} eq 'encoded string'
++ ? ($_->{name}, "$_->{name}_size", '-1')
++ : $_->{name}} @d;
++ $accum->{suffix}[1] .=
++ (join '',
++ "#define $name",
++ '(', join(', ', @parms), ')',
++ "\\\n ",
++ $name, '_wide',
++ '(', join(', ', @args), ')',
++ "\n");
++ @args = map {$_->{type} eq 'encoded string'
++ ? ("aspell_cast_from_wide_($_->{name})",
++ "$_->{name}_size*aspell_cast_(int,sizeof(*($_->{name})))",
++ "sizeof(*($_->{name}))")
++ : $_->{name}} @d;
++ return (join '',
++ "\n",
++ "/* version of $name that is safe to use with (null terminated) wide characters */\n",
++ '#define ',
++ $name, '_w',
++ '(', join(', ', @parms), ')',
++ "\\\n ",
++ $name, '_wide',
++ '(', join(', ', @args), ')',
++ "\n");
++}
++
++
+ =item call_func NAME @TYPES PARMS ; %ACCUM
+
+ Return a string to call a func. Will prefix the function with return
+@@ -103,7 +166,6 @@ Parms can be any of:
+
+ sub call_func ( $ \@ $ ; \% ) {
+ my ($name, $d, $p, $accum) = @_;
+- $accum = {} unless defined $accum;
+ my @d = @$d;
+ my $func_ret = to_type_name(shift @d, {%$p,pos=>'return'}, %$accum);
+ return (join '',
+@@ -148,8 +210,14 @@ sub to_type_name ( $ $ ; \% ) {
+ my $name = $t->{name};
+ my $type = $t->{type};
+
+- return ( (to_type_name {%$d, type=>'string'}, $p, %$accum) ,
+- (to_type_name {%$d, type=>'int', name=>"$d->{name}_size"}, $p, %$accum) )
++ if ($name eq 'encoded string' && $is_cc && $pos eq 'parm') {
++ my @types = ((to_type_name {%$d, type=>($p->{wide}?'const void pointer':'string')}, $p, %$accum),
++ (to_type_name {%$d, type=>'int', name=>"$d->{name}_size"}, $p, %$accum));
++ push @types, (to_type_name {%$d, type=>'int', name=>"$d->{name}_type_width"}, $p, %$accum) if $p->{wide};
++ return @types;
++ }
++ return ( (to_type_name {%$d, type=>($p->{wide}?'const void pointer':'string')}, $p, %$accum) ,
++ (to_type_name {%$d, type=>'int', name=>"$d->{name}_size"}, $p, %$accum) )
+ if $name eq 'encoded string' && $is_cc && $pos eq 'parm';
+
+ my $str;
+@@ -174,7 +242,7 @@ sub to_type_name ( $ $ ; \% ) {
+ $str .= "String";
+ }
+ } elsif ($name eq 'encoded string') {
+- $str .= "const char *";
++ $str .= $p->{wide} ? "const void *" : "const char *";
+ } elsif ($name eq '') {
+ $str .= "void";
+ } elsif ($name eq 'bool' && $is_cc) {
+@@ -186,7 +254,7 @@ sub to_type_name ( $ $ ; \% ) {
+ if ($t->{pointer}) {
+ $accum->{types}->{$name} = $t;
+ } else {
+- $accum->{headers}->{$t->{created_in}} = true;
++ $accum->{headers}->{$t->{created_in}} = true unless $mode eq 'cc';
+ }
+ $str .= "$c_type Aspell" if $mode eq 'cc';
+ $str .= to_mixed($name);
+@@ -214,6 +282,7 @@ sub to_type_name ( $ $ ; \% ) {
+ return $str;
+ }
+
++
+ =item make_desc DESC ; LEVEL
+
+ Make a C comment out of DESC optionally indenting it LEVEL spaces.
+@@ -286,6 +355,7 @@ sub form_c_method ($ $ $ ; \% )
+ } else {
+ $func = "aspell $class $name";
+ }
++ $func .= " wide" if $p->{wide};
+ if (exists $d->{'const'}) {
+ splice @data, 1, 0, {type => "const $class", name=> $this_name};
+ } else {
+@@ -306,6 +376,21 @@ sub make_c_method ($ $ $ ; \%)
+ return &make_func(@ret);
+ }
+
++sub get_c_func_name ($ $ $)
++{
++ my @ret = &form_c_method(@_);
++ return undef unless @ret > 0;
++ return to_lower $ret[0];
++}
++
++sub make_wide_macro ($ $ $ ; \%)
++{
++ my @ret = &form_c_method(@_);
++ return undef unless @ret > 0;
++ my $str = &make_wide_version(@ret);
++ return $str;
++}
++
+ sub call_c_method ($ $ $ ; \%)
+ {
+ my @ret = &form_c_method(@_);
+diff --git a/auto/MkSrc/Create.pm b/auto/MkSrc/Create.pm
+index d39b60e..630ede5 100644
+--- a/auto/MkSrc/Create.pm
++++ b/auto/MkSrc/Create.pm
+@@ -77,8 +77,10 @@ sub create_cc_file ( % ) {
+ $file .= "#include \"aspell.h\"\n" if $p{type} eq 'cxx';
+ $file .= "#include \"settings.h\"\n" if $p{type} eq 'native_impl' && $p{name} eq 'errors';
+ $file .= "#include \"gettext.h\"\n" if $p{type} eq 'native_impl' && $p{name} eq 'errors';
++ $file .= cmap {"#include <$_>\n"} sort keys %{$accum{sys_headers}};
+ $file .= cmap {"#include \"".to_lower($_).".hpp\"\n"} sort keys %{$accum{headers}};
+- $file .= "#ifdef __cplusplus\nextern \"C\" {\n#endif\n" if $p{header} && !$p{cxx};
++ $file .= "\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n" if $p{header} && !$p{cxx};
++ $file .= join('', grep {defined $_} @{$accum{prefix}});
+ $file .= "\nnamespace $p{namespace} {\n\n" if $p{cxx};
+ if (defined $info{forward}{proc}{$p{type}}) {
+ my @types = sort {$a->{name} cmp $b->{name}} (values %{$accum{types}});
+@@ -86,6 +88,7 @@ sub create_cc_file ( % ) {
+ }
+ $file .= "\n";
+ $file .= $body;
++ $file .= join('', grep {defined $_} @{$accum{suffix}});
+ $file .= "\n\n}\n\n" if $p{cxx};
+ $file .= "#ifdef __cplusplus\n}\n#endif\n" if $p{header} && !$p{cxx};
+ $file .= "#endif /* $hm */\n" if $p{header};
+diff --git a/auto/MkSrc/Info.pm b/auto/MkSrc/Info.pm
+index c644028..ace8e21 100644
+--- a/auto/MkSrc/Info.pm
++++ b/auto/MkSrc/Info.pm
+@@ -60,6 +60,7 @@ each proc sub should take the following argv
+ the object from which it is a member of
+ no native: do not attempt to create a native implementation
+ treat as object: treat as a object rather than a pointer
++ no conv: do not converted an encoded string
+
+ The %info structure is initialized as follows:
+
+@@ -104,8 +105,8 @@ The %info structure is initialized as follows:
+ errors => {}, # possible errors
+ method => {
+ # A class method
+- options => ['desc', 'posib err', 'c func', 'const',
+- 'c only', 'c impl', 'cxx impl'],
++ options => ['desc', 'posib err', 'c func', 'const', 'no conv', 'on conv error',
++ 'c only', 'c impl', 'cxx impl', 'cc extra'],
+ groups => undef},
+ constructor => {
+ # A class constructor
+diff --git a/auto/MkSrc/ProcCc.pm b/auto/MkSrc/ProcCc.pm
+index 47c4338..98cc435 100644
+--- a/auto/MkSrc/ProcCc.pm
++++ b/auto/MkSrc/ProcCc.pm
+@@ -23,7 +23,7 @@ use MkSrc::Info;
+ sub make_c_object ( $ @ );
+
+ $info{group}{proc}{cc} = sub {
+- my ($data) = @_;
++ my ($data,@rest) = @_;
+ my $ret;
+ my $stars = (70 - length $data->{name})/2;
+ $ret .= "/";
+@@ -33,14 +33,14 @@ $info{group}{proc}{cc} = sub {
+ $ret .= "/\n";
+ foreach my $d (@{$data->{data}}) {
+ $ret .= "\n\n";
+- $ret .= $info{$d->{type}}{proc}{cc}->($d);
++ $ret .= $info{$d->{type}}{proc}{cc}->($d,@rest);
+ }
+ $ret .= "\n\n";
+ return $ret;
+ };
+
+ $info{enum}{proc}{cc} = sub {
+- my ($d) = @_;
++ my ($d,@rest) = @_;
+ my $n = "Aspell".to_mixed($d->{name});
+ return ("\n".
+ make_desc($d->{desc}).
+@@ -58,21 +58,26 @@ $info{struct}{proc}{cc} = sub {
+ };
+
+ $info{union}{proc}{cc} = sub {
+- return make_c_object "union", $_[0];
++ return make_c_object "union", @_;
+ };
+
+ $info{class}{proc}{cc} = sub {
+- my ($d) = @_;
++ my ($d,$accum) = @_;
+ my $class = $d->{name};
+ my $classname = "Aspell".to_mixed($class);
+ my $ret = "";
+ $ret .= "typedef struct $classname $classname;\n\n";
+ foreach (@{$d->{data}}) {
+- my $s = make_c_method($class, $_, {mode=>'cc'});
++ my $s = make_c_method($class, $_, {mode=>'cc'}, %$accum);
+ next unless defined $s;
+ $ret .= "\n";
+ $ret .= make_desc($_->{desc});
+- $ret .= make_c_method($class, $_, {mode=>'cc'}).";\n";
++ $ret .= make_c_method($class, $_, {mode=>'cc'}, %$accum).";\n";
++ if (grep {$_->{type} eq 'encoded string'} @{$_->{data}}) {
++ $ret .= make_c_method($class, $_, {mode=>'cc', wide=>true}, %$accum).";\n";
++ $ret .= make_wide_macro($class, $_, {mode=>'cc'}, %$accum);
++ }
++ $ret .= "\n".$_->{'cc extra'}."\n" if defined $_->{'cc extra'};
+ }
+ $ret .= "\n";
+ return $ret;
+@@ -105,7 +110,8 @@ $info{errors}{proc}{cc} = sub {
+ };
+
+ sub make_c_object ( $ @ ) {
+- my ($t, $d) = @_;
++ my ($t, $d, $accum) = @_;
++ $accum = {} unless defined $accum;
+ my $struct;
+ $struct .= "Aspell";
+ $struct .= to_mixed($d->{name});
+@@ -120,7 +126,7 @@ sub make_c_object ( $ @ ) {
+ "\n};\n"),
+ "typedef $t $struct $struct;",
+ join ("\n",
+- map {make_c_method($d->{name}, $_, {mode=>'cc'}).";"}
++ map {make_c_method($d->{name}, $_, {mode=>'cc'}, %$accum).";"}
+ grep {$_->{type} eq 'method'}
+ @{$d->{data}})
+ )."\n";
+diff --git a/auto/MkSrc/ProcImpl.pm b/auto/MkSrc/ProcImpl.pm
+index b8628fd..3d0f220 100644
+--- a/auto/MkSrc/ProcImpl.pm
++++ b/auto/MkSrc/ProcImpl.pm
+@@ -45,10 +45,13 @@ $info{class}{proc}{impl} = sub {
+ foreach (grep {$_ ne ''} split /\s*,\s*/, $data->{'c impl headers'}) {
+ $accum->{headers}{$_} = true;
+ }
+- foreach my $d (@{$data->{data}}) {
++ my @d = @{$data->{data}};
++ while (@d) {
++ my $d = shift @d;
++ my $need_wide = false;
+ next unless one_of $d->{type}, qw(method constructor destructor);
+ my @parms = @{$d->{data}} if exists $d->{data};
+- my $m = make_c_method $data->{name}, $d, {mode=>'cc_cxx', use_name=>true}, %$accum;
++ my $m = make_c_method $data->{name}, $d, {mode=>'cc_cxx', use_name=>true, wide=>$d->{wide}}, %$accum;
+ next unless defined $m;
+ $ret .= "extern \"C\" $m\n";
+ $ret .= "{\n";
+@@ -57,24 +60,49 @@ $info{class}{proc}{impl} = sub {
+ } else {
+ if ($d->{type} eq 'method') {
+ my $ret_type = shift @parms;
+- my $ret_native = to_type_name $ret_type, {mode=>'native_no_err', pos=>'return'}, %$accum;
++ my $ret_native = to_type_name $ret_type, {mode=>'native_no_err', pos=>'return', wide=>$d->{wide}}, %$accum;
+ my $snum = 0;
++ my $call_fun = $d->{name};
++ my @call_parms;
+ foreach (@parms) {
+ my $n = to_lower($_->{name});
+- if ($_->{type} eq 'encoded string') {
+- $accum->{headers}{'mutable string'} = true;
+- $accum->{headers}{'convert'} = true;
+- $ret .= " ths->temp_str_$snum.clear();\n";
+- $ret .= " ths->to_internal_->convert($n, ${n}_size, ths->temp_str_$snum);\n";
+- $ret .= " unsigned int s$snum = ths->temp_str_$snum.size();\n";
+- $_ = "MutableString(ths->temp_str_$snum.mstr(), s$snum)";
+- $snum++;
++ if ($_->{type} eq 'encoded string' && !exists($d->{'no conv'})) {
++ $need_wide = true unless $d->{wide};
++ die unless exists $d->{'posib err'};
++ $accum->{headers}{'mutable string'} = true;
++ $accum->{headers}{'convert'} = true;
++ my $name = get_c_func_name $data->{name}, $d, {mode=>'cc_cxx', use_name=>true, wide=>$d->{wide}};
++ $ret .= " ths->temp_str_$snum.clear();\n";
++ if ($d->{wide}) {
++ $ret .= " ${n}_size = get_correct_size(\"$name\", ths->to_internal_->in_type_width(), ${n}_size, ${n}_type_width);\n";
++ } else {
++ $ret .= " PosibErr<int> ${n}_fixed_size = get_correct_size(\"$name\", ths->to_internal_->in_type_width(), ${n}_size);\n";
++ if (exists($d->{'on conv error'})) {
++ $ret .= " if (${n}_fixed_size.get_err()) {\n";
++ $ret .= " ".$d->{'on conv error'}."\n";
++ $ret .= " } else {\n";
++ $ret .= " ${n}_size = ${n}_fixed_size;\n";
++ $ret .= " }\n";
++ } else {
++ $ret .= " ths->err_.reset(${n}_fixed_size.release_err());\n";
++ $ret .= " if (ths->err_ != 0) return ".(c_error_cond $ret_type).";\n";
++ }
++ }
++ $ret .= " ths->to_internal_->convert($n, ${n}_size, ths->temp_str_$snum);\n";
++ $ret .= " unsigned int s$snum = ths->temp_str_$snum.size();\n";
++ push @call_parms, "MutableString(ths->temp_str_$snum.mstr(), s$snum)";
++ $snum++;
++ } elsif ($_->{type} eq 'encoded string') {
++ $need_wide = true unless $d->{wide};
++ push @call_parms, $n, "${n}_size";
++ push @call_parms, "${n}_type_width" if $d->{wide};
++ $call_fun .= " wide" if $d->{wide};
+ } else {
+- $_ = $n;
++ push @call_parms, $n;
+ }
+ }
+- my $parms = '('.(join ', ', @parms).')';
+- my $exp = "ths->".to_lower($d->{name})."$parms";
++ my $parms = '('.(join ', ', @call_parms).')';
++ my $exp = "ths->".to_lower($call_fun)."$parms";
+ if (exists $d->{'posib err'}) {
+ $accum->{headers}{'posib err'} = true;
+ $ret .= " PosibErr<$ret_native> ret = $exp;\n";
+@@ -118,6 +146,7 @@ $info{class}{proc}{impl} = sub {
+ }
+ }
+ $ret .= "}\n\n";
++ unshift @d,{%$d, wide=>true} if $need_wide;
+ }
+ return $ret;
+ };
+diff --git a/auto/MkSrc/Read.pm b/auto/MkSrc/Read.pm
+index 4b3d1d0..4bf640e 100644
+--- a/auto/MkSrc/Read.pm
++++ b/auto/MkSrc/Read.pm
+@@ -88,13 +88,13 @@ sub advance ( ) {
+ $in_pod = $1 if $line =~ /^\=(\w+)/;
+ $line = '' if $in_pod;
+ $in_pod = undef if $in_pod && $in_pod eq 'cut';
+- $line =~ s/\#.*$//;
++ $line =~ s/(?<!\\)\#.*$//;
+ $line =~ s/^(\t*)//;
+ $level = $base_level + length($1);
+ $line =~ s/\s*$//;
+ ++$base_level if $line =~ s/^\{$//;
+ --$base_level if $line =~ s/^\}$//;
+- $line =~ s/\\([{}])/$1/g;
++ $line =~ s/\\([{}#\\])/$1/g;
+ } while ($line eq '');
+ #print "$level:$line\n";
+ }
+diff --git a/auto/mk-src.in b/auto/mk-src.in
+index 0e7833a..eb3353f 100644
+--- a/auto/mk-src.in
++++ b/auto/mk-src.in
+@@ -608,6 +608,7 @@ errors:
+ invalid expression
+ mesg => "%expression" is not a valid regular expression.
+ parms => expression
++
+ }
+ group: speller
+ {
+@@ -650,6 +651,7 @@ class: speller
+ posib err
+ desc => Returns 0 if it is not in the dictionary,
+ 1 if it is, or -1 on error.
++ on conv error => return 0;
+ /
+ bool
+ encoded string: word
+@@ -715,6 +717,8 @@ class: speller
+ desc => Return NULL on error.
+ The word list returned by suggest is only
+ valid until the next call to suggest.
++ on conv error =>
++ word = NULL; word_size = 0;
+ /
+ const word list
+ encoded string: word
+@@ -840,7 +844,6 @@ class: document checker
+ void
+
+ method: process
+-
+ desc => Process a string.
+ The string passed in should only be split on
+ white space characters. Furthermore, between
+@@ -849,10 +852,10 @@ class: document checker
+ in the document. Passing in strings out of
+ order, skipping strings or passing them in
+ more than once may lead to undefined results.
++ no conv
+ /
+ void
+- string: str
+- int: size
++ encoded string: str
+
+ method: next misspelling
+
+@@ -860,9 +863,23 @@ class: document checker
+ processed string. If there are no more
+ misspelled words, then token.word will be
+ NULL and token.size will be 0
++ cc extra =>
++ \#define aspell_document_checker_next_misspelling_w(type, ths) \\
++ aspell_document_checker_next_misspelling_adj(ths, sizeof(type))
+ /
+ token object
+
++ method: next misspelling adj
++ desc => internal: do not use
++ c impl =>
++ Token res = ths->next_misspelling();
++ res.offset /= type_width;
++ res.len /= type_width;
++ return res;
++ /
++ token object
++ int: type_width
++
+ method: filter
+
+ desc => Returns the underlying filter class.
+@@ -922,9 +939,30 @@ class: string enumeration
+ ths->from_internal_->append_null(ths->temp_str);
+ return ths->temp_str.data();
+ \}
++ cc extra =>
++ \#define aspell_string_enumeration_next_w(type, ths) \\
++ aspell_cast_(const type *, aspell_string_enumeration_next_wide(ths, sizeof(type)))
+ /
+ const string
+
++ method: next wide
++ c impl =>
++ const char * s = ths->next();
++ if (s == 0) {
++ return s;
++ } else if (ths->from_internal_ == 0) \{
++ assert(type_width == 1);
++ return s;
++ \} else \{
++ assert(type_width == ths->from_internal_->out_type_width());
++ ths->temp_str.clear();
++ ths->from_internal_->convert(s,-1,ths->temp_str);
++ ths->from_internal_->append_null(ths->temp_str);
++ return ths->temp_str.data();
++ \}
++ /
++ const void pointer
++ int: type_width
+ }
+ group: info
+ {
+diff --git a/common/convert.cpp b/common/convert.cpp
+index 1add95a..7ae0317 100644
+--- a/common/convert.cpp
++++ b/common/convert.cpp
+@@ -541,18 +541,25 @@ namespace acommon {
+ // Trivial Conversion
+ //
+
++ const char * unsupported_null_term_wide_string_msg =
++ "Null-terminated wide-character strings unsupported when used this way.";
++
+ template <typename Chr>
+ struct DecodeDirect : public Decode
+ {
++ DecodeDirect() {type_width = sizeof(Chr);}
+ void decode(const char * in0, int size, FilterCharVector & out) const {
+ const Chr * in = reinterpret_cast<const Chr *>(in0);
+- if (size == -1) {
++ if (size == -sizeof(Chr)) {
+ for (;*in; ++in)
+- out.append(*in);
++ out.append(*in, sizeof(Chr));
++ } else if (size <= -1) {
++ fprintf(stderr, "%s\n", unsupported_null_term_wide_string_msg);
++ abort();
+ } else {
+- const Chr * stop = reinterpret_cast<const Chr *>(in0 +size);
++ const Chr * stop = reinterpret_cast<const Chr *>(in0) + size/sizeof(Chr);
+ for (;in != stop; ++in)
+- out.append(*in);
++ out.append(*in, sizeof(Chr));
+ }
+ }
+ PosibErr<void> decode_ec(const char * in0, int size,
+@@ -565,6 +572,7 @@ namespace acommon {
+ template <typename Chr>
+ struct EncodeDirect : public Encode
+ {
++ EncodeDirect() {type_width = sizeof(Chr);}
+ void encode(const FilterChar * in, const FilterChar * stop,
+ CharVector & out) const {
+ for (; in != stop; ++in) {
+@@ -594,11 +602,15 @@ namespace acommon {
+ template <typename Chr>
+ struct ConvDirect : public DirectConv
+ {
++ ConvDirect() {type_width = sizeof(Chr);}
+ void convert(const char * in0, int size, CharVector & out) const {
+- if (size == -1) {
++ if (size == -sizeof(Chr)) {
+ const Chr * in = reinterpret_cast<const Chr *>(in0);
+ for (;*in != 0; ++in)
+ out.append(in, sizeof(Chr));
++ } else if (size <= -1) {
++ fprintf(stderr, "%s\n", unsupported_null_term_wide_string_msg);
++ abort();
+ } else {
+ out.append(in0, size);
+ }
+@@ -1121,5 +1133,20 @@ namespace acommon {
+ }
+ return 0;
+ }
+-
++
++ PosibErr<void> unsupported_null_term_wide_string_err_(const char * func) {
++ static bool reported_to_stderr = false;
++ PosibErr<void> err = make_err(other_error, unsupported_null_term_wide_string_msg);
++ if (!reported_to_stderr) {
++ CERR.printf("ERROR: %s: %s\n", func, unsupported_null_term_wide_string_msg);
++ reported_to_stderr = true;
++ }
++ return err;
++ }
++
++ void unsupported_null_term_wide_string_abort_(const char * func) {
++ CERR.printf("%s: %s\n", unsupported_null_term_wide_string_msg);
++ abort();
++ }
++
+ }
+diff --git a/common/convert.hpp b/common/convert.hpp
+index 76332ee..c948973 100644
+--- a/common/convert.hpp
++++ b/common/convert.hpp
+@@ -7,6 +7,8 @@
+ #ifndef ASPELL_CONVERT__HPP
+ #define ASPELL_CONVERT__HPP
+
++#include "settings.h"
++
+ #include "string.hpp"
+ #include "posib_err.hpp"
+ #include "char_vector.hpp"
+@@ -25,8 +27,9 @@ namespace acommon {
+ typedef const Config CacheConfig;
+ typedef const char * CacheKey;
+ String key;
++ int type_width; // type width in bytes
+ bool cache_key_eq(const char * l) const {return key == l;}
+- ConvBase() {}
++ ConvBase() : type_width(1) {}
+ private:
+ ConvBase(const ConvBase &);
+ void operator=(const ConvBase &);
+@@ -56,6 +59,8 @@ namespace acommon {
+ virtual ~Encode() {}
+ };
+ struct DirectConv { // convert directly from in_code to out_code.
++ int type_width; // type width in bytes
++ DirectConv() : type_width(1) {}
+ // should not take ownership of decode and encode.
+ // decode and encode guaranteed to stick around for the life
+ // of the object.
+@@ -126,6 +131,9 @@ namespace acommon {
+ const char * in_code() const {return decode_->key.c_str();}
+ const char * out_code() const {return encode_->key.c_str();}
+
++ int in_type_width() const {return decode_->type_width;}
++ int out_type_width() const {return encode_->type_width;}
++
+ void append_null(CharVector & out) const
+ {
+ const char nul[4] = {0,0,0,0}; // 4 should be enough
+@@ -191,6 +199,10 @@ namespace acommon {
+ }
+ }
+
++ void convert(const void * in, int size, CharVector & out) {
++ convert(static_cast<const char *>(in), size, out);
++ }
++
+ void generic_convert(const char * in, int size, CharVector & out);
+
+ };
+@@ -412,6 +424,30 @@ namespace acommon {
+ return operator()(str, str + byte_size);}
+ };
+
++#ifdef SLOPPY_NULL_TERM_STRINGS
++ static const bool sloppy_null_term_strings = true;
++#else
++ static const bool sloppy_null_term_strings = false;
++#endif
++
++ PosibErr<void> unsupported_null_term_wide_string_err_(const char * func);
++ void unsupported_null_term_wide_string_abort_(const char * func);
++
++ static inline PosibErr<int> get_correct_size(const char * func, int conv_type_width, int size) {
++ if (sloppy_null_term_strings && size <= -1)
++ return -conv_type_width;
++ if (size <= -1 && -conv_type_width != size)
++ return unsupported_null_term_wide_string_err_(func);
++ return size;
++ }
++ static inline int get_correct_size(const char * func, int conv_type_width, int size, int type_width) {
++ if ((sloppy_null_term_strings || type_width <= -1) && size <= -1)
++ return -conv_type_width;
++ if (size <= -1 && conv_type_width != type_width)
++ unsupported_null_term_wide_string_abort_(func);
++ return size;
++ }
++
+ }
+
+ #endif
+diff --git a/common/document_checker.cpp b/common/document_checker.cpp
+index 5e510c4..0ccf1cd 100644
+--- a/common/document_checker.cpp
++++ b/common/document_checker.cpp
+@@ -44,7 +44,9 @@ namespace acommon {
+ void DocumentChecker::process(const char * str, int size)
+ {
+ proc_str_.clear();
+- conv_->decode(str, size, proc_str_);
++ PosibErr<int> fixed_size = get_correct_size("aspell_document_checker_process", conv_->in_type_width(), size);
++ if (!fixed_size.has_err())
++ conv_->decode(str, fixed_size, proc_str_);
+ proc_str_.append(0);
+ FilterChar * begin = proc_str_.pbegin();
+ FilterChar * end = proc_str_.pend() - 1;
+@@ -53,6 +55,19 @@ namespace acommon {
+ tokenizer_->reset(begin, end);
+ }
+
++ void DocumentChecker::process_wide(const void * str, int size, int type_width)
++ {
++ proc_str_.clear();
++ int fixed_size = get_correct_size("aspell_document_checker_process", conv_->in_type_width(), size, type_width);
++ conv_->decode(static_cast<const char *>(str), fixed_size, proc_str_);
++ proc_str_.append(0);
++ FilterChar * begin = proc_str_.pbegin();
++ FilterChar * end = proc_str_.pend() - 1;
++ if (filter_)
++ filter_->process(begin, end);
++ tokenizer_->reset(begin, end);
++ }
++
+ Token DocumentChecker::next_misspelling()
+ {
+ bool correct;
+diff --git a/common/document_checker.hpp b/common/document_checker.hpp
+index d35bb88..11a3c73 100644
+--- a/common/document_checker.hpp
++++ b/common/document_checker.hpp
+@@ -36,6 +36,7 @@ namespace acommon {
+ PosibErr<void> setup(Tokenizer *, Speller *, Filter *);
+ void reset();
+ void process(const char * str, int size);
++ void process_wide(const void * str, int size, int type_width);
+ Token next_misspelling();
+
+ Filter * filter() {return filter_;}
+diff --git a/common/version.cpp b/common/version.cpp
+index 414d938..9e60b75 100644
+--- a/common/version.cpp
++++ b/common/version.cpp
+@@ -1,8 +1,17 @@
+ #include "settings.h"
+
+-extern "C" const char * aspell_version_string() {
+ #ifdef NDEBUG
+- return VERSION " NDEBUG";
++# define NDEBUG_STR " NDEBUG"
++#else
++# define NDEBUG_STR
++#endif
++
++#ifdef SLOPPY_NULL_TERM_STRINGS
++# define SLOPPY_STR " SLOPPY"
++#else
++# define SLOPPY_STR
+ #endif
+- return VERSION;
++
++extern "C" const char * aspell_version_string() {
++ return VERSION NDEBUG_STR SLOPPY_STR;
+ }
+diff --git a/configure.ac b/configure.ac
+index 60e3b39..a5d51e3 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -73,6 +73,9 @@ AC_ARG_ENABLE(filter-version-control,
+ AC_ARG_ENABLE(32-bit-hash-fun,
+ AS_HELP_STRING([--enable-32-bit-hash-fun],[use 32-bit hash function for compiled dictionaries]))
+
++AC_ARG_ENABLE(sloppy-null-term-strings,
++ AS_HELP_STRING([--enable-sloppy-null-term-strings],[allows allow null terminated UCS-2 and UCS-4 strings]))
++
+ AC_ARG_ENABLE(pspell-compatibility,
+ AS_HELP_STRING([--disable-pspell-compatibility],[don't install pspell compatibility libraries]))
+
+@@ -141,6 +144,11 @@ then
+ AC_DEFINE(USE_32_BIT_HASH_FUN, 1, [Defined if 32-bit hash function should be used for compiled dictionaries.])
+ fi
+
++if test "$enable_sloppy_null_term_strings" = "yes"
++then
++ AC_DEFINE(SLOPPY_NULL_TERM_STRINGS, 1, [Defined if null-terminated UCS-2 and UCS-4 strings should always be allowed.])
++fi
++
+ AM_CONDITIONAL(PSPELL_COMPATIBILITY,
+ [test "$enable_pspell_compatibility" != "no"])
+ AM_CONDITIONAL(INCREMENTED_SONAME,
+diff --git a/manual/aspell.texi b/manual/aspell.texi
+index 45fa091..f400e06 100644
+--- a/manual/aspell.texi
++++ b/manual/aspell.texi
+@@ -158,7 +158,8 @@ Installing
+
+ * Generic Install Instructions::
+ * HTML Manuals and "make clean"::
+-* Curses Notes::
++* Curses Notes::
++* Upgrading from Aspell 0.60.7::
+ * Loadable Filter Notes::
+ * Upgrading from Aspell 0.50::
+ * Upgrading from Aspell .33/Pspell .12::
+@@ -2206,18 +2207,26 @@ int correct = aspell_speller_check(spell_checker, @var{word}, @var{size});
+ @end smallexample
+
+ @noindent
+-@var{word} is expected to be a @code{const char *} character
+-string. If the encoding is set to be @code{ucs-2} or
+-@code{ucs-4} @var{word} is expected to be a cast
+-from either @code{const u16int *} or @code{const u32int *}
+-respectively. @code{u16int} and @code{u32int} are generally
+-@code{unsigned short} and @code{unsigned int} respectively.
+-@var{size} is the length of the string or @code{-1} if the string
+-is null terminated. If the string is a cast from @code{const u16int
+-*} or @code{const u32int *} then @code{@i{size}} is the amount of
+-space in bytes the string takes up after being cast to @code{const
+-char *} and not the true size of the string. @code{sspell_speller_check}
+-will return @code{0} if it is not found and non-zero otherwise.
++@var{word} is expected to be a @code{const char *} character string.
++@var{size} is the length of the string or @code{-1} if the string is
++null terminated. @code{aspell_speller_check} will return @code{0} if it is not found
++and non-zero otherwise.
++
++If you are using the @code{ucs-2} or @code{ucs-4} encoding then the
++string is expected to be either a 2 or 4 byte wide integer
++(respectively) and the @code{_w} macro vesion should be used:
++
++@smallexample
++int correct = aspell_speller_check_w(spell_checker, @var{word}, @var{size});
++@end smallexample
++
++The macro will cast the string to to the correct type and convert
++@var{size} into bytes for you and then a call the special wide version of the
++function that will make sure the encoding is correct for the type
++passed in. For compatibility with older versions of Aspell the normal
++non-wide functions can still be used provided that the size of the
++string, in bytes, is also passed in. Null terminated @code{ucs-2} or
++@code{ucs-4} are no longer supported when using the non-wide functions.
+
+ If the word is not correct, then the @code{suggest} method can be used
+ to come up with likely replacements.
+@@ -2236,7 +2245,28 @@ delete_aspell_string_enumeration(elements);
+
+ Notice how @code{elements} is deleted but @code{suggestions} is not.
+ The value returned by @code{suggestions} is only valid to the next
+-call to @code{suggest}. Once a replacement is made the
++call to @code{suggest}.
++
++If you are using the @code{ucs-2} or @code{ucs-4} encoding then, in
++addition to using the @code{_w} macro for the @code{suggest} method, you
++should also use the @code{_w} macro with the @code{next} method which
++will cast the string to the correct type for you. For example, if you
++are using the @code{ucs-2} encoding and the string is a @code{const
++uint16_t *} then you should use:
++
++@smallexample
++AspellWordList * suggestions = aspell_speller_suggest_w(spell_checker,
++ @var{word}, @var{size});
++AspellStringEnumeration * elements = aspell_word_list_elements(suggestions);
++const uint16_t * word;
++while ( (word = aspell_string_enumeration_next_w(uint16_t, aspell_elements)) != NULL )
++@{
++ // add to suggestion list
++@}
++delete_aspell_string_enumeration(elements);
++@end smallexample
++
++Once a replacement is made the
+ @code{store_repl} method should be used to communicate the replacement
+ pair back to the spell checker (for the reason, @pxref{Notes on
+ Storing Replacement Pairs}). Its usage is as follows:
+diff --git a/manual/readme.texi b/manual/readme.texi
+index 669ab8e..531721f 100644
+--- a/manual/readme.texi
++++ b/manual/readme.texi
+@@ -15,15 +15,16 @@ The latest version can always be found at GNU Aspell's home page at
+ @uref{http://aspell.net}.
+
+ @menu
+-* Generic Install Instructions::
+-* HTML Manuals and "make clean"::
+-* Curses Notes::
+-* Loadable Filter Notes::
+-* Using 32-Bit Dictionaries on a 64-Bit System::
+-* Upgrading from Aspell 0.50::
+-* Upgrading from Aspell .33/Pspell .12::
+-* Upgrading from a Pre-0.50 snapshot::
+-* WIN32 Notes::
++* Generic Install Instructions::
++* HTML Manuals and "make clean"::
++* Curses Notes::
++* Upgrading from Aspell 0.60.7::
++* Loadable Filter Notes::
++* Using 32-Bit Dictionaries on a 64-Bit System::
++* Upgrading from Aspell 0.50::
++* Upgrading from Aspell .33/Pspell .12::
++* Upgrading from a Pre-0.50 snapshot::
++* WIN32 Notes::
+ @end menu
+
+ @node Generic Install Instructions
+@@ -121,17 +122,62 @@ In addition your system must also support the @code{mblen} function.
+ Although this function was defined in the ISO C89 standard (ANSI
+ X3.159-1989), not all systems have it.
+
++@node Upgrading from Aspell 0.60.7
++@appendixsec Upgrading from Aspell 0.60.7
++
++To prevent a potentially unbounded buffer over-read, Aspell no longer
++supports null-terminated UCS-2 and UCS-4 encoded strings with the
++original C API. Null-termianted 8-bit or UTF-8 encoded strings are
++still supported, as are UCS-2 and UCS-4 encoded strings when the
++length is passed in.
++
++As of Aspell 0.60.8 a function from the original API that expects an
++encoded string as a parameter will return meaningless results (or an
++error code) if string is null terminated and the encoding is set to
++@code{ucs-2} or @code{ucs-4}. In addition, a single:
++@example
++ERROR: aspell_speller_check: Null-terminated wide-character strings unsupported when used this way.
++@end example
++will be printed to standard error the first time one of those
++functions is called.
++
++Application that use null-terminated UCS-2/4 strings should either (1)
++use the interface intended for working with wide-characters
++(@xref{Through the C API}); or (2) define
++@code{ASPELL_ENCODE_SETTING_SECURE} before including @code{aspell.h}.
++In the latter case is is important that the application explicitly
++sets the encoding to a known value. Defining
++@code{ASPELL_ENCODE_SETTING_SECURE} and not setting the encoding
++explicitly or allowing user of the application to set the encoding
++could result in an unbounded buffer over-read.
++
++If it is necessary to preserve binary compatibility with older
++versions of Aspell, the easiest thing would be to determine the length
++of the UCS-2/4 string---in bytes---and pass that in. Due to an
++implemenation detail, existing API functions can be made to work with
++null-terminated UCS-2/4 strings safely by passing in either @code{-2}
++or @code{-4} (corresponding to the width of the character type) as the
++size. Doing so, however, will cause a buffer over-read for unpatched
++version of Aspell. To avoid this it will be necessary to parse the
++version string to determine the correct value to use. However, no
++official support will be provided for the latter method.
++
++If the application can not be recompiled, then Aspell can be configured
++to preserve the old behavior by passing
++@option{--enable-sloppy-null-term-strings} to @command{configure}. When Aspell
++is compiled this way the version string will include the string
++@samp{ SLOPPY}.
++
+ @node Loadable Filter Notes
+ @appendixsec Loadable Filter Notes
+-
++
+ Support for being able to load additional filter modules at run-time
+ has only been verified to work on Linux platforms. If you get linker
+ errors when trying to use a filter, then it is likely that loadable
+ filter support is not working yet on your platform. Thus, in order to
+ get Aspell to work correctly you will need to avoid compiling the
+ filters as individual modules by using the
+-@option{--enable-compile-in-filters} when configuring Aspell with
+-@command{./configure}.
++@option{--enable-compile-in-filters} @command{configure} option.
+
+ @node Using 32-Bit Dictionaries on a 64-Bit System
+ @appendixsec Using 32-Bit Dictionaries on a 64-Bit System
+--
+2.17.1
+
diff --git a/meta/recipes-support/aspell/aspell/CVE-2019-20433-0002.patch b/meta/recipes-support/aspell/aspell/CVE-2019-20433-0002.patch
new file mode 100644
index 0000000000..9569ddeebe
--- /dev/null
+++ b/meta/recipes-support/aspell/aspell/CVE-2019-20433-0002.patch
@@ -0,0 +1,68 @@
+From cefd447e5528b08bb0cd6656bc52b4255692cefc Mon Sep 17 00:00:00 2001
+From: Kevin Atkinson <kevina@gnu.org>
+Date: Sat, 17 Aug 2019 20:25:21 -0400
+Subject: [PATCH 2/2] Increment library version to reflect API changes.
+
+CVE: CVE-2019-20433
+Upstream-Status: Backport [https://github.com/GNUAspell/aspell/commit/cefd447e5528b08bb0cd6656bc52b4255692cefc]
+
+Signed-off-by: Stefan Ghinea <stefan.ghinea@windriver.com>
+---
+ Makefile.am | 31 +++++++++++++++++--------------
+ 1 file changed, 17 insertions(+), 14 deletions(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 7e15851..19dc044 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -94,18 +94,25 @@ libaspell_la_SOURCES =\
+
+ libaspell_la_LIBADD = $(LTLIBINTL) $(PTHREAD_LIB)
+
+-## Libtool to so name
+-## C:R:A => (C-A).(A).(R)
+-## 16:5:0 => 16.0.5
+-## 16:5:1 => 15.1.5
+-## 18:0:2 => 16.2.0
+-## 17:0:2 => 15.2.0
+-
++## The version string is current[:revision[:age]]
++##
++## Before a release that has changed the source code at all
++## increment revision.
++##
++## After merging changes that have changed the API in a backwards
++## comptable way set revision to 0 and bump both current and age.
++##
++## Do not change the API in a backwards incompatible way.
++##
++## See "Libtool: Updating version info"
++## (https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html)
++## for more into
++##
+ if INCREMENTED_SONAME
+-libaspell_la_LDFLAGS = -version-info 18:0:2 -no-undefined
++libaspell_la_LDFLAGS = -version-info 19:0:3 -no-undefined
+ else
+ ## Use C-1:R:A
+-libaspell_la_LDFLAGS = -version-info 17:0:2 -no-undefined
++libaspell_la_LDFLAGS = -version-info 18:0:3 -no-undefined
+ endif
+
+ if PSPELL_COMPATIBILITY
+@@ -113,11 +120,7 @@ libpspell_la_SOURCES = lib/dummy.cpp
+
+ libpspell_la_LIBADD = libaspell.la
+
+-if INCREMENTED_SONAME
+-libpspell_la_LDFLAGS = -version-info 18:0:2 -no-undefined
+-else
+-libpspell_la_LDFLAGS = -version-info 17:0:2 -no-undefined
+-endif
++libpspell_la_LDFLAGS = $(libaspell_la_LDFLAGS)
+
+ endif
+
+--
+2.17.1
+
diff --git a/meta/recipes-support/aspell/aspell_0.60.7.bb b/meta/recipes-support/aspell/aspell_0.60.7.bb
index b565cb3c6e..1e104c263c 100644
--- a/meta/recipes-support/aspell/aspell_0.60.7.bb
+++ b/meta/recipes-support/aspell/aspell_0.60.7.bb
@@ -8,6 +8,8 @@ PR = "r1"
SRC_URI = "${GNU_MIRROR}/aspell/aspell-${PV}.tar.gz \
file://0001-Fix-various-bugs-found-by-OSS-Fuze.patch \
+ file://CVE-2019-20433-0001.patch \
+ file://CVE-2019-20433-0002.patch \
"
SRC_URI[md5sum] = "8ef2252609c511cd2bb26f3a3932ef28"
SRC_URI[sha256sum] = "5ca8fc8cb0370cc6c9eb5b64c6d1bc5d57b3750dbf17887726c3407d833b70e4"
diff --git a/meta/recipes-support/attr/acl_2.2.52.bb b/meta/recipes-support/attr/acl_2.2.52.bb
index 6bc77d868d..31ec64a43d 100644
--- a/meta/recipes-support/attr/acl_2.2.52.bb
+++ b/meta/recipes-support/attr/acl_2.2.52.bb
@@ -25,6 +25,9 @@ SRC_URI[sha256sum] = "179074bb0580c06c4b4137be4c5a92a701583277967acdb5546043c787
require ea-acl.inc
+# Has issues with newer versions of make
+PARALLEL_MAKEINST = ""
+
# avoid RPATH hardcode to staging dir
do_configure_append() {
sed -i ${S}/config.status -e s,^\\\(hardcode_into_libs=\\\).*$,\\1\'no\',
diff --git a/meta/recipes-support/attr/attr_2.4.47.bb b/meta/recipes-support/attr/attr_2.4.47.bb
index fc88bef830..c3da66a0c7 100644
--- a/meta/recipes-support/attr/attr_2.4.47.bb
+++ b/meta/recipes-support/attr/attr_2.4.47.bb
@@ -12,4 +12,7 @@ SRC_URI += "file://attr-Missing-configure.ac.patch \
SRC_URI[md5sum] = "84f58dec00b60f2dc8fd1c9709291cc7"
SRC_URI[sha256sum] = "25772f653ac5b2e3ceeb89df50e4688891e21f723c460636548971652af0a859"
+# Has issues with newer versions of make
+PARALLEL_MAKEINST = ""
+
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-support/curl/curl/CVE-2019-15601.patch b/meta/recipes-support/curl/curl/CVE-2019-15601.patch
new file mode 100644
index 0000000000..7bfaae7b21
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2019-15601.patch
@@ -0,0 +1,46 @@
+Upstream-Status: Backport [https://github.com/curl/curl/commit/1b71bc532bde8621fd3260843f8197182a467ff2]
+CVE: CVE-2019-15601
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+
+From 1b71bc532bde8621fd3260843f8197182a467ff2 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 7 Nov 2019 10:13:01 +0100
+Subject: [PATCH] file: on Windows, refuse paths that start with \\
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+... as that might cause an unexpected SMB connection to a given host
+name.
+
+Reported-by: Fernando Muñoz
+CVE-2019-15601
+Bug: https://curl.haxx.se/docs/CVE-2019-15601.html
+---
+ lib/file.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/lib/file.c b/lib/file.c
+index d349cd9241..166931d7f1 100644
+--- a/lib/file.c
++++ b/lib/file.c
+@@ -136,7 +136,7 @@ static CURLcode file_connect(struct connectdata *conn, bool *done)
+ struct Curl_easy *data = conn->data;
+ char *real_path;
+ struct FILEPROTO *file = data->req.protop;
+- int fd;
++ int fd = -1;
+ #ifdef DOS_FILESYSTEM
+ size_t i;
+ char *actual_path;
+@@ -181,7 +181,9 @@ static CURLcode file_connect(struct connectdata *conn, bool *done)
+ return CURLE_URL_MALFORMAT;
+ }
+
+- fd = open_readonly(actual_path, O_RDONLY|O_BINARY);
++ if(strncmp("\\\\", actual_path, 2))
++ /* refuse to open path that starts with two backslashes */
++ fd = open_readonly(actual_path, O_RDONLY|O_BINARY);
+ file->path = actual_path;
+ #else
+ if(memchr(real_path, 0, real_path_len)) {
diff --git a/meta/recipes-support/curl/curl/CVE-2020-8169.patch b/meta/recipes-support/curl/curl/CVE-2020-8169.patch
new file mode 100644
index 0000000000..476d86af6e
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2020-8169.patch
@@ -0,0 +1,141 @@
+From 600a8cded447cd7118ed50142c576567c0cf5158 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 14 May 2020 14:37:12 +0200
+Subject: [PATCH] url: make the updated credentials URL-encoded in the URL
+
+Found-by: Gregory Jefferis
+Reported-by: Jeroen Ooms
+Added test 1168 to verify. Bug spotted when doing a redirect.
+Bug: https://github.com/jeroen/curl/issues/224
+Closes #5400
+
+Upstream-Status: Backport
+https://github.com/curl/curl/commit/600a8cded447cd
+
+CVE: CVE-2020-8169
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ lib/url.c | 6 ++--
+ tests/data/Makefile.inc | 1 +
+ tests/data/test1168 | 78 +++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 83 insertions(+), 2 deletions(-)
+ create mode 100644 tests/data/test1168
+
+Index: curl-7.69.1/lib/url.c
+===================================================================
+--- curl-7.69.1.orig/lib/url.c
++++ curl-7.69.1/lib/url.c
+@@ -2776,12 +2776,14 @@ static CURLcode override_login(struct Cu
+
+ /* for updated strings, we update them in the URL */
+ if(user_changed) {
+- uc = curl_url_set(data->state.uh, CURLUPART_USER, *userp, 0);
++ uc = curl_url_set(data->state.uh, CURLUPART_USER, *userp,
++ CURLU_URLENCODE);
+ if(uc)
+ return Curl_uc_to_curlcode(uc);
+ }
+ if(passwd_changed) {
+- uc = curl_url_set(data->state.uh, CURLUPART_PASSWORD, *passwdp, 0);
++ uc = curl_url_set(data->state.uh, CURLUPART_PASSWORD, *passwdp,
++ CURLU_URLENCODE);
+ if(uc)
+ return Curl_uc_to_curlcode(uc);
+ }
+Index: curl-7.69.1/tests/data/Makefile.inc
+===================================================================
+--- curl-7.69.1.orig/tests/data/Makefile.inc
++++ curl-7.69.1/tests/data/Makefile.inc
+@@ -129,7 +129,7 @@
+ test1136 test1137 test1138 test1139 test1140 test1141 test1142 test1143 \
+ test1144 test1145 test1146 test1147 test1148 test1149 test1150 test1151 \
+ test1152 test1153 test1154 test1155 test1156 test1157 test1158 test1159 \
+-test1160 test1161 test1162 test1163 test1164 test1165 \
++test1160 test1161 test1162 test1163 test1164 test1165 test1168 \
+ test1170 test1171 test1172 test1173 test1174 \
+ \
+ test1200 test1201 test1202 test1203 test1204 test1205 test1206 test1207 \
+Index: curl-7.69.1/tests/data/test1168
+===================================================================
+--- /dev/null
++++ curl-7.69.1/tests/data/test1168
+@@ -0,0 +1,78 @@
++<testcase>
++<info>
++<keywords>
++HTTP
++HTTP GET
++followlocation
++</keywords>
++</info>
++# Server-side
++<reply>
++<data>
++HTTP/1.1 301 This is a weirdo text message swsclose
++Date: Thu, 09 Nov 2010 14:49:00 GMT
++Server: test-server/fake
++Location: /data/11680002.txt
++Connection: close
++
++This server reply is for testing a simple Location: following
++
++</data>
++<data2>
++HTTP/1.1 200 Followed here fine swsclose
++Date: Thu, 09 Nov 2010 14:49:00 GMT
++Server: test-server/fake
++Content-Length: 52
++
++If this is received, the location following worked
++
++</data2>
++<datacheck>
++HTTP/1.1 301 This is a weirdo text message swsclose
++Date: Thu, 09 Nov 2010 14:49:00 GMT
++Server: test-server/fake
++Location: /data/11680002.txt
++Connection: close
++
++HTTP/1.1 200 Followed here fine swsclose
++Date: Thu, 09 Nov 2010 14:49:00 GMT
++Server: test-server/fake
++Content-Length: 52
++
++If this is received, the location following worked
++
++</datacheck>
++</reply>
++
++# Client-side
++<client>
++<server>
++http
++</server>
++ <name>
++HTTP redirect with credentials using # in user and password
++ </name>
++ <command>
++http://%HOSTIP:%HTTPPORT/want/1168 -L -u "catmai#d:#DZaRJYrixKE*gFY"
++</command>
++</client>
++
++# Verify data after the test has been "shot"
++<verify>
++<strip>
++^User-Agent:.*
++</strip>
++<protocol>
++GET /want/1168 HTTP/1.1
++Host: %HOSTIP:%HTTPPORT
++Authorization: Basic Y2F0bWFpI2Q6I0RaYVJKWXJpeEtFKmdGWQ==
++Accept: */*
++
++GET /data/11680002.txt HTTP/1.1
++Host: %HOSTIP:%HTTPPORT
++Authorization: Basic Y2F0bWFpI2Q6I0RaYVJKWXJpeEtFKmdGWQ==
++Accept: */*
++
++</protocol>
++</verify>
++</testcase>
diff --git a/meta/recipes-support/curl/curl/CVE-2020-8177.patch b/meta/recipes-support/curl/curl/CVE-2020-8177.patch
new file mode 100644
index 0000000000..81ec59848c
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2020-8177.patch
@@ -0,0 +1,67 @@
+From 8236aba58542c5f89f1d41ca09d84579efb05e22 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Sun, 31 May 2020 23:09:59 +0200
+Subject: [PATCH] tool_getparam: -i is not OK if -J is used
+
+Reported-by: sn on hackerone
+Bug: https://curl.haxx.se/docs/CVE-2020-8177.html
+
+Upstream-Status: Backport
+CVE:CVE-2020-8177
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/tool_cb_hdr.c | 22 ++++------------------
+ src/tool_getparam.c | 5 +++++
+ 2 files changed, 9 insertions(+), 18 deletions(-)
+
+Index: curl-7.69.1/src/tool_cb_hdr.c
+===================================================================
+--- curl-7.69.1.orig/src/tool_cb_hdr.c
++++ curl-7.69.1/src/tool_cb_hdr.c
+@@ -134,25 +134,11 @@
+ filename = parse_filename(p, len);
+ if(filename) {
+ if(outs->stream) {
+- int rc;
+- /* already opened and possibly written to */
+- if(outs->fopened)
+- fclose(outs->stream);
+- outs->stream = NULL;
++ /* indication of problem, get out! */
++ free(filename);
++ return failure;
++ }
+
+- /* rename the initial file name to the new file name */
+- rc = rename(outs->filename, filename);
+- if(rc != 0) {
+- warnf(outs->config->global, "Failed to rename %s -> %s: %s\n",
+- outs->filename, filename, strerror(errno));
+- }
+- if(outs->alloc_filename)
+- Curl_safefree(outs->filename);
+- if(rc != 0) {
+- free(filename);
+- return failure;
+- }
+- }
+ outs->is_cd_filename = TRUE;
+ outs->s_isreg = TRUE;
+ outs->fopened = FALSE;
+Index: curl-7.69.1/src/tool_getparam.c
+===================================================================
+--- curl-7.69.1.orig/src/tool_getparam.c
++++ curl-7.69.1/src/tool_getparam.c
+@@ -1807,6 +1807,11 @@ ParameterError getparameter(const char *
+ }
+ break;
+ case 'i':
++ if(config->content_disposition) {
++ warnf(global,
++ "--include and --remote-header-name cannot be combined.\n");
++ return PARAM_BAD_USE;
++ }
+ config->show_headers = toggle; /* show the headers as well in the
+ general output stream */
+ break;
diff --git a/meta/recipes-support/curl/curl_7.66.0.bb b/meta/recipes-support/curl/curl_7.66.0.bb
index d1975f2460..506ae0eade 100644
--- a/meta/recipes-support/curl/curl_7.66.0.bb
+++ b/meta/recipes-support/curl/curl_7.66.0.bb
@@ -7,6 +7,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=be5d9e1419c4363f4b32037a2d3b7ffa"
SRC_URI = "http://curl.haxx.se/download/curl-${PV}.tar.bz2 \
file://0001-replace-krb5-config-with-pkg-config.patch \
+ file://CVE-2019-15601.patch \
+ file://CVE-2020-8169.patch \
+ file://CVE-2020-8177.patch \
"
SRC_URI[md5sum] = "c238aa394e3aa47ca4fcb0491774149f"
diff --git a/meta/recipes-support/gnupg/gnupg_2.2.17.bb b/meta/recipes-support/gnupg/gnupg_2.2.19.bb
index 689cf8a75e..a0577d61d3 100644
--- a/meta/recipes-support/gnupg/gnupg_2.2.17.bb
+++ b/meta/recipes-support/gnupg/gnupg_2.2.19.bb
@@ -19,8 +19,8 @@ SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
SRC_URI_append_class-native = " file://0001-configure.ac-use-a-custom-value-for-the-location-of-.patch \
file://relocate.patch"
-SRC_URI[md5sum] = "1ba2d9b70c377f8e967742064c27a19c"
-SRC_URI[sha256sum] = "afa262868e39b651a2db4c071fba90415154243e83a830ca00516f9a807fd514"
+SRC_URI[md5sum] = "cb3b373d08ba078c325299945a7f2818"
+SRC_URI[sha256sum] = "242554c0e06f3a83c420b052f750b65ead711cc3fddddb5e7274fcdbb4e9dec0"
EXTRA_OECONF = "--disable-ldap \
--disable-ccid-driver \
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-a.patch b/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-a.patch
new file mode 100644
index 0000000000..1811afc2ff
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-a.patch
@@ -0,0 +1,90 @@
+From 6e798091d057de6b7f94b9dede4c5c919ec41f89 Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Tue, 2 Jun 2020 20:53:11 +0200
+Subject: [PATCH 1/3] stek: differentiate initial state from valid time window
+ of TOTP
+
+commit c2646aeee94e71cb15c90a3147cf3b5b0ca158ca from https://gitlab.com/gnutls/gnutls.git
+
+There was a confusion in the TOTP implementation in stek.c. When the
+mechanism is initialized at the first time, it records the timestamp
+but doesn't initialize the key. This removes the timestamp recording
+at the initialization phase, so the key is properly set later.
+
+Upstream-Status: Backport
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+Signed-off-by: Haiqing Bai <Haiqing.Bai@windriver.com>
+---
+ lib/stek.c | 17 +++++------------
+ tests/resume-with-previous-stek.c | 4 ++--
+ tests/tls13/prf-early.c | 8 ++++----
+ 3 files changed, 11 insertions(+), 18 deletions(-)
+
+diff --git a/lib/stek.c b/lib/stek.c
+index 2f885ce..5ab9e7d 100644
+--- a/lib/stek.c
++++ b/lib/stek.c
+@@ -323,20 +323,13 @@ int _gnutls_initialize_session_ticket_key_rotation(gnutls_session_t session, con
+ if (unlikely(session == NULL || key == NULL))
+ return gnutls_assert_val(GNUTLS_E_INTERNAL_ERROR);
+
+- if (session->key.totp.last_result == 0) {
+- int64_t t;
+- memcpy(session->key.initial_stek, key->data, key->size);
+- t = totp_next(session);
+- if (t < 0)
+- return gnutls_assert_val(t);
++ if (unlikely(session->key.totp.last_result != 0))
++ return GNUTLS_E_INVALID_REQUEST;
+
+- session->key.totp.last_result = t;
+- session->key.totp.was_rotated = 0;
+-
+- return GNUTLS_E_SUCCESS;
+- }
++ memcpy(session->key.initial_stek, key->data, key->size);
+
+- return GNUTLS_E_INVALID_REQUEST;
++ session->key.totp.was_rotated = 0;
++ return 0;
+ }
+
+ /*
+diff --git a/tests/resume-with-previous-stek.c b/tests/resume-with-previous-stek.c
+index f212b18..05c1c90 100644
+--- a/tests/resume-with-previous-stek.c
++++ b/tests/resume-with-previous-stek.c
+@@ -196,8 +196,8 @@ static void server(int fd, unsigned rounds, const char *prio)
+ serverx509cred = NULL;
+ }
+
+- if (num_stek_rotations != 2)
+- fail("STEK should be rotated exactly twice (%d)!\n", num_stek_rotations);
++ if (num_stek_rotations != 3)
++ fail("STEK should be rotated exactly three times (%d)!\n", num_stek_rotations);
+
+ if (serverx509cred)
+ gnutls_certificate_free_credentials(serverx509cred);
+diff --git a/tests/tls13/prf-early.c b/tests/tls13/prf-early.c
+index 414b1db..bc31962 100644
+--- a/tests/tls13/prf-early.c
++++ b/tests/tls13/prf-early.c
+@@ -123,10 +123,10 @@ static void dump(const char *name, const uint8_t *data, unsigned data_size)
+ } \
+ }
+
+-#define KEY_EXP_VALUE "\xc0\x1e\xc2\xa4\xb7\xb4\x04\xaa\x91\x5d\xaf\xe8\xf7\x4d\x19\xdf\xd0\xe6\x08\xd6\xb4\x3b\xcf\xca\xc9\x32\x75\x3b\xe3\x11\x19\xb1\xac\x68"
+-#define HELLO_VALUE "\x77\xdb\x10\x0b\xe8\xd0\xb9\x38\xbc\x49\xe6\xbe\xf2\x47\x2a\xcc\x6b\xea\xce\x85\x04\xd3\x9e\xd8\x06\x16\xad\xff\xcd\xbf\x4b"
+-#define CONTEXT_VALUE "\xf2\x17\x9f\xf2\x66\x56\x87\x66\xf9\x5c\x8a\xd7\x4e\x1d\x46\xee\x0e\x44\x41\x4c\xcd\xac\xcb\xc0\x31\x41\x2a\xb6\xd7\x01\x62"
+-#define NULL_CONTEXT_VALUE "\xcd\x79\x07\x93\xeb\x96\x07\x3e\xec\x78\x90\x89\xf7\x16\x42\x6d\x27\x87\x56\x7c\x7b\x60\x2b\x20\x44\xd1\xea\x0c\x89\xfb\x8b"
++#define KEY_EXP_VALUE "\xc1\x6b\x6c\xb9\x88\x33\xd5\x28\x80\xec\x27\x87\xa2\x6f\x4b\xd0\x01\x5e\x7f\xca\xd7\xd4\x8a\x3f\xe2\x48\x92\xef\x02\x14\xfb\x81\x90\x04"
++#define HELLO_VALUE "\x2a\x73\xd9\x74\x04\x4e\x0a\x5f\x41\x8a\x09\xcb\x45\x33\x1a\xec\xd3\xfc\xdc\x1b\x2c\x67\x26\xe4\x9c\xfe\x1f\xa5\x74\xf1\x4f"
++#define CONTEXT_VALUE "\x87\xf6\x88\xe3\xd7\xf2\x05\xbc\xa4\x10\xa3\x48\x9f\xf5\xcf\x97\x06\x22\x4e\xfd\x18\x32\x52\x1d\xbd\x26\xf5\x5b\x21\x20\xec"
++#define NULL_CONTEXT_VALUE "\xf9\xca\xfe\x45\x44\x96\xdb\xc5\x41\x8f\x7e\x8e\xd7\xb0\x7d\x19\x45\xaf\x09\xbc\x1e\x82\x94\xac\x55\xe5\xb9\xb4\x3b\xe8\xc0"
+
+ static int handshake_callback_called;
+
+--
+2.17.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-b.patch b/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-b.patch
new file mode 100644
index 0000000000..12486e1710
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-b.patch
@@ -0,0 +1,137 @@
+From 6c7f9703e42bc5278d0a4a6f0a39d07d62123ea3 Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <dueno@redhat.com>
+Date: Tue, 31 Mar 2020 06:58:48 +0200
+Subject: [PATCH 2/3] build: use valgrind client request to detect undefined
+ memory use
+
+commit 50ad8778a81f9421effa4c5a3b457f98e559b178 from https://gitlab.com/gnutls/gnutls.git
+
+This tightens the check introduced in
+ac2f71b892d13a7ab4cc39086eef179042c7e23c, by using the valgrind client
+request to explicitly mark the "uninitialized but initialization is
+needed before use" regions. With this patch and the
+fix (c01011c2d8533dbbbe754e49e256c109cb848d0d) reverted, you will see
+the following error when running dtls_hello_random_value under
+valgrind:
+
+ $ valgrind ./dtls_hello_random_value
+ testing: default
+ ==520145== Conditional jump or move depends on uninitialised value(s)
+ ==520145== at 0x4025F5: hello_callback (dtls_hello_random_value.c:90)
+ ==520145== by 0x488BF97: _gnutls_call_hook_func (handshake.c:1215)
+ ==520145== by 0x488C1AA: _gnutls_send_handshake2 (handshake.c:1332)
+ ==520145== by 0x488FC7E: send_client_hello (handshake.c:2290)
+ ==520145== by 0x48902A1: handshake_client (handshake.c:2908)
+ ==520145== by 0x48902A1: gnutls_handshake (handshake.c:2740)
+ ==520145== by 0x402CB3: client (dtls_hello_random_value.c:153)
+ ==520145== by 0x402CB3: start (dtls_hello_random_value.c:317)
+ ==520145== by 0x402EFE: doit (dtls_hello_random_value.c:331)
+ ==520145== by 0x4023D4: main (utils.c:254)
+ ==520145==
+
+Upstream-Status: Backport
+
+Signed-off-by: Daiki Ueno <dueno@redhat.com>
+Signed-off-by: Haiqing Bai <Haiqing.Bai@windriver.com>
+---
+ configure.ac | 2 ++
+ lib/handshake.c | 15 +++++++++++++++
+ lib/state.c | 21 ++++++++++++++++++---
+ 3 files changed, 35 insertions(+), 3 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index 172cf42..12da283 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -233,6 +233,8 @@ AS_IF([test "$ac_cv_search___atomic_load_4" = "none required" || test "$ac_cv_se
+ dnl We use its presence to detect C11 threads
+ AC_CHECK_HEADERS([threads.h])
+
++AC_CHECK_HEADERS([valgrind/memcheck.h])
++
+ AC_ARG_ENABLE(padlock,
+ AS_HELP_STRING([--disable-padlock], [unconditionally disable padlock acceleration]),
+ use_padlock=$enableval)
+diff --git a/lib/handshake.c b/lib/handshake.c
+index 84a0e52..8d58fa4 100644
+--- a/lib/handshake.c
++++ b/lib/handshake.c
+@@ -57,6 +57,9 @@
+ #include "secrets.h"
+ #include "tls13/session_ticket.h"
+ #include "locks.h"
++#ifdef HAVE_VALGRIND_MEMCHECK_H
++#include <valgrind/memcheck.h>
++#endif
+
+ #define TRUE 1
+ #define FALSE 0
+@@ -242,6 +245,12 @@ int _gnutls_gen_client_random(gnutls_session_t session)
+ return gnutls_assert_val(ret);
+ }
+
++#ifdef HAVE_VALGRIND_MEMCHECK_H
++ if (RUNNING_ON_VALGRIND)
++ VALGRIND_MAKE_MEM_DEFINED(session->security_parameters.client_random,
++ GNUTLS_RANDOM_SIZE);
++#endif
++
+ return 0;
+ }
+
+@@ -320,6 +329,12 @@ int _gnutls_gen_server_random(gnutls_session_t session, int version)
+ return ret;
+ }
+
++#ifdef HAVE_VALGRIND_MEMCHECK_H
++ if (RUNNING_ON_VALGRIND)
++ VALGRIND_MAKE_MEM_DEFINED(session->security_parameters.server_random,
++ GNUTLS_RANDOM_SIZE);
++#endif
++
+ return 0;
+ }
+
+diff --git a/lib/state.c b/lib/state.c
+index 0e1d155..98900c1 100644
+--- a/lib/state.c
++++ b/lib/state.c
+@@ -55,6 +55,9 @@
+ #include "ext/cert_types.h"
+ #include "locks.h"
+ #include "kx.h"
++#ifdef HAVE_VALGRIND_MEMCHECK_H
++#include <valgrind/memcheck.h>
++#endif
+
+ /* to be used by supplemental data support to disable TLS1.3
+ * when supplemental data have been globally registered */
+@@ -564,10 +567,22 @@ int gnutls_init(gnutls_session_t * session, unsigned int flags)
+ UINT32_MAX;
+ }
+
+- /* everything else not initialized here is initialized
+- * as NULL or 0. This is why calloc is used.
++ /* Everything else not initialized here is initialized as NULL
++ * or 0. This is why calloc is used. However, we want to
++ * ensure that certain portions of data are initialized at
++ * runtime before being used. Mark such regions with a
++ * valgrind client request as undefined.
+ */
+-
++#ifdef HAVE_VALGRIND_MEMCHECK_H
++ if (RUNNING_ON_VALGRIND) {
++ if (flags & GNUTLS_CLIENT)
++ VALGRIND_MAKE_MEM_UNDEFINED((*session)->security_parameters.client_random,
++ GNUTLS_RANDOM_SIZE);
++ if (flags & GNUTLS_SERVER)
++ VALGRIND_MAKE_MEM_UNDEFINED((*session)->security_parameters.server_random,
++ GNUTLS_RANDOM_SIZE);
++ }
++#endif
+ handshake_internal_state_clear1(*session);
+
+ #ifdef HAVE_WRITEV
+--
+2.17.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-c.patch b/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-c.patch
new file mode 100644
index 0000000000..2d8efeb889
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2020-13777-c.patch
@@ -0,0 +1,68 @@
+From b34da057dc9eb01df30b436ba9cb047c21fb0151 Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Tue, 2 Jun 2020 21:45:17 +0200
+Subject: [PATCH 3/3] valgrind: check if session ticket key is used without
+ initialization
+
+commit 3d7fae761e65e9d0f16d7247ee8a464d4fe002da from https://gitlab.com/gnutls/gnutls.git
+
+This adds a valgrind client request for
+session->key.session_ticket_key to make sure that it is not used
+without initialization.
+
+Upstream-Status: Backport
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+Signed-off-by: Haiqing Bai <Haiqing.Bai@windriver.com>
+---
+ lib/state.c | 5 ++++-
+ lib/stek.c | 8 ++++++++
+ 2 files changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/lib/state.c b/lib/state.c
+index 98900c1..cabdf7d 100644
+--- a/lib/state.c
++++ b/lib/state.c
+@@ -578,9 +578,12 @@ int gnutls_init(gnutls_session_t * session, unsigned int flags)
+ if (flags & GNUTLS_CLIENT)
+ VALGRIND_MAKE_MEM_UNDEFINED((*session)->security_parameters.client_random,
+ GNUTLS_RANDOM_SIZE);
+- if (flags & GNUTLS_SERVER)
++ if (flags & GNUTLS_SERVER) {
+ VALGRIND_MAKE_MEM_UNDEFINED((*session)->security_parameters.server_random,
+ GNUTLS_RANDOM_SIZE);
++ VALGRIND_MAKE_MEM_UNDEFINED((*session)->key.session_ticket_key,
++ TICKET_MASTER_KEY_SIZE);
++ }
+ }
+ #endif
+ handshake_internal_state_clear1(*session);
+diff --git a/lib/stek.c b/lib/stek.c
+index 5ab9e7d..316555b 100644
+--- a/lib/stek.c
++++ b/lib/stek.c
+@@ -21,6 +21,9 @@
+ */
+ #include "gnutls_int.h"
+ #include "stek.h"
++#ifdef HAVE_VALGRIND_MEMCHECK_H
++#include <valgrind/memcheck.h>
++#endif
+
+ #define NAME_POS (0)
+ #define KEY_POS (TICKET_KEY_NAME_SIZE)
+@@ -143,6 +146,11 @@ static int rotate(gnutls_session_t session)
+ call_rotation_callback(session, key, t);
+ session->key.totp.last_result = t;
+ memcpy(session->key.session_ticket_key, key, sizeof(key));
++#ifdef HAVE_VALGRIND_MEMCHECK_H
++ if (RUNNING_ON_VALGRIND)
++ VALGRIND_MAKE_MEM_DEFINED(session->key.session_ticket_key,
++ TICKET_MASTER_KEY_SIZE);
++#endif
+
+ session->key.totp.was_rotated = 1;
+ } else if (t < 0) {
+--
+2.17.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2020-24659.patch b/meta/recipes-support/gnutls/gnutls/CVE-2020-24659.patch
new file mode 100644
index 0000000000..1702325e66
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2020-24659.patch
@@ -0,0 +1,117 @@
+From 29ee67c205855e848a0a26e6d0e4f65b6b943e0a Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Sat, 22 Aug 2020 17:19:39 +0200
+Subject: [PATCH] handshake: reject no_renegotiation alert if handshake is
+ incomplete
+
+If the initial handshake is incomplete and the server sends a
+no_renegotiation alert, the client should treat it as a fatal error
+even if its level is warning. Otherwise the same handshake
+state (e.g., DHE parameters) are reused in the next gnutls_handshake
+call, if it is called in the loop idiom:
+
+ do {
+ ret = gnutls_handshake(session);
+ } while (ret < 0 && gnutls_error_is_fatal(ret) == 0);
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+CVE: CVE-2020-24659
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls.git]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ lib/gnutls_int.h | 1 +
+ lib/handshake.c | 48 +++++++++++++-----
+ 2 files changed, 36 insertions(+), 13 deletions(-)
+
+diff --git a/lib/gnutls_int.h b/lib/gnutls_int.h
+index bb6c19713..31cec5c0c 100644
+--- a/lib/gnutls_int.h
++++ b/lib/gnutls_int.h
+@@ -1370,6 +1370,7 @@ typedef struct {
+ #define HSK_RECORD_SIZE_LIMIT_RECEIVED (1<<26) /* server: record_size_limit extension was seen but not accepted yet */
+ #define HSK_OCSP_REQUESTED (1<<27) /* server: client requested OCSP stapling */
+ #define HSK_CLIENT_OCSP_REQUESTED (1<<28) /* client: server requested OCSP stapling */
++#define HSK_SERVER_HELLO_RECEIVED (1<<29) /* client: Server Hello message has been received */
+
+ /* The hsk_flags are for use within the ongoing handshake;
+ * they are reset to zero prior to handshake start by gnutls_handshake. */
+diff --git a/lib/handshake.c b/lib/handshake.c
+index b40f84b3d..ce2d160e2 100644
+--- a/lib/handshake.c
++++ b/lib/handshake.c
+@@ -2051,6 +2051,8 @@ read_server_hello(gnutls_session_t session,
+ if (ret < 0)
+ return gnutls_assert_val(ret);
+
++ session->internals.hsk_flags |= HSK_SERVER_HELLO_RECEIVED;
++
+ return 0;
+ }
+
+@@ -2575,16 +2577,42 @@ int gnutls_rehandshake(gnutls_session_t session)
+ return 0;
+ }
+
++/* This function checks whether the error code should be treated fatal
++ * or not, and also does the necessary state transition. In
++ * particular, in the case of a rehandshake abort it resets the
++ * handshake's internal state.
++ */
+ inline static int
+ _gnutls_abort_handshake(gnutls_session_t session, int ret)
+ {
+- if (((ret == GNUTLS_E_WARNING_ALERT_RECEIVED) &&
+- (gnutls_alert_get(session) == GNUTLS_A_NO_RENEGOTIATION))
+- || ret == GNUTLS_E_GOT_APPLICATION_DATA)
+- return 0;
++ switch (ret) {
++ case GNUTLS_E_WARNING_ALERT_RECEIVED:
++ if (gnutls_alert_get(session) == GNUTLS_A_NO_RENEGOTIATION) {
++ /* The server always toleretes a "no_renegotiation" alert. */
++ if (session->security_parameters.entity == GNUTLS_SERVER) {
++ STATE = STATE0;
++ return ret;
++ }
++
++ /* The client should tolerete a "no_renegotiation" alert only if:
++ * - the initial handshake has completed, or
++ * - a Server Hello is not yet received
++ */
++ if (session->internals.initial_negotiation_completed ||
++ !(session->internals.hsk_flags & HSK_SERVER_HELLO_RECEIVED)) {
++ STATE = STATE0;
++ return ret;
++ }
+
+- /* this doesn't matter */
+- return GNUTLS_E_INTERNAL_ERROR;
++ return gnutls_assert_val(GNUTLS_E_UNEXPECTED_PACKET);
++ }
++ return ret;
++ case GNUTLS_E_GOT_APPLICATION_DATA:
++ STATE = STATE0;
++ return ret;
++ default:
++ return ret;
++ }
+ }
+
+
+@@ -2747,13 +2774,7 @@ int gnutls_handshake(gnutls_session_t session)
+ }
+
+ if (ret < 0) {
+- /* In the case of a rehandshake abort
+- * we should reset the handshake's internal state.
+- */
+- if (_gnutls_abort_handshake(session, ret) == 0)
+- STATE = STATE0;
+-
+- return ret;
++ return _gnutls_abort_handshake(session, ret);
+ }
+
+ /* clear handshake buffer */
+--
+2.17.0
+
diff --git a/meta/recipes-support/gnutls/gnutls/posix-shell.patch b/meta/recipes-support/gnutls/gnutls/posix-shell.patch
deleted file mode 100644
index 938e2d1e18..0000000000
--- a/meta/recipes-support/gnutls/gnutls/posix-shell.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-Don't embed the path to the build-time POSIX shell as this will be
-$TMPDIR/hosttools/bash, which is no good on the target.
-
-Instead default to /bin/sh but allow it to be set in the environment.
-
-This isn't really upstreamable but I filed a bug at
-https://gitlab.com/gnutls/gnutls/issues/807 and hope a proper fix will be
-integrated.
-
-Upstream-Status: Inappropriate
-Signed-off-by: Ross Burton <ross.burton@intel.com>
-
-diff --git a/src/libopts/m4/libopts.m4 b/src/libopts/m4/libopts.m4
-index c6ad738..a62faca 100644
---- a/src/libopts/m4/libopts.m4
-+++ b/src/libopts/m4/libopts.m4
-@@ -112,21 +112,7 @@ AC_DEFUN([INVOKE_LIBOPTS_MACROS_FIRST],[
- AC_CHECK_FUNCS([mmap canonicalize_file_name snprintf strdup strchr \
- strrchr strsignal fchmod fstat chmod])
- AC_PROG_SED
-- [while :
-- do
-- POSIX_SHELL=`which bash`
-- test -x "$POSIX_SHELL" && break
-- POSIX_SHELL=`which dash`
-- test -x "$POSIX_SHELL" && break
-- POSIX_SHELL=/usr/xpg4/bin/sh
-- test -x "$POSIX_SHELL" && break
-- POSIX_SHELL=`/bin/sh -c '
-- exec 2>/dev/null
-- if ! true ; then exit 1 ; fi
-- echo /bin/sh'`
-- test -x "$POSIX_SHELL" && break
-- ]AC_MSG_ERROR([cannot locate a working POSIX shell])[
-- done]
-+ POSIX_SHELL="${POSIX_SHELL:-/bin/sh}"
- AC_DEFINE_UNQUOTED([POSIX_SHELL], ["${POSIX_SHELL}"],
- [define to a working POSIX compliant shell])
- AC_SUBST([POSIX_SHELL])
diff --git a/meta/recipes-support/gnutls/gnutls_3.6.8.bb b/meta/recipes-support/gnutls/gnutls_3.6.13.bb
index c927063f0a..2ed012f9d6 100644
--- a/meta/recipes-support/gnutls/gnutls_3.6.8.bb
+++ b/meta/recipes-support/gnutls/gnutls_3.6.13.bb
@@ -19,11 +19,14 @@ SHRT_VER = "${@d.getVar('PV').split('.')[0]}.${@d.getVar('PV').split('.')[1]}"
SRC_URI = "https://www.gnupg.org/ftp/gcrypt/gnutls/v${SHRT_VER}/gnutls-${PV}.tar.xz \
file://arm_eabi.patch \
- file://posix-shell.patch \
+ file://CVE-2020-13777-a.patch \
+ file://CVE-2020-13777-b.patch \
+ file://CVE-2020-13777-c.patch \
+ file://CVE-2020-24659.patch \
"
-SRC_URI[md5sum] = "9dcf0aa45d1a42e1b3ca5d39ec7c61a8"
-SRC_URI[sha256sum] = "aa81944e5635de981171772857e72be231a7e0f559ae0292d2737de475383e83"
+SRC_URI[md5sum] = "bb1fe696a11543433785b4fc70ca225f"
+SRC_URI[sha256sum] = "32041df447d9f4644570cf573c9f60358e865637d69b7e59d1159b7240b52f38"
inherit autotools texinfo pkgconfig gettext lib_package gtk-doc
diff --git a/meta/recipes-support/icu/icu/CVE-2020-10531.patch b/meta/recipes-support/icu/icu/CVE-2020-10531.patch
new file mode 100644
index 0000000000..56303fc0f2
--- /dev/null
+++ b/meta/recipes-support/icu/icu/CVE-2020-10531.patch
@@ -0,0 +1,122 @@
+From b7d08bc04a4296982fcef8b6b8a354a9e4e7afca Mon Sep 17 00:00:00 2001
+From: Frank Tang <ftang@chromium.org>
+Date: Sat, 1 Feb 2020 02:39:04 +0000
+Subject: [PATCH] ICU-20958 Prevent SEGV_MAPERR in append
+
+See #971
+
+Upstream-Status: Backport [https://github.com/unicode-org/icu/commit/b7d08bc04a4296982fcef8b6b8a354a9e4e7afca]
+CVE: CVE-2020-10531
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ icu4c/source/common/unistr.cpp | 6 ++-
+ icu4c/source/test/intltest/ustrtest.cpp | 62 +++++++++++++++++++++++++
+ icu4c/source/test/intltest/ustrtest.h | 1 +
+ 3 files changed, 68 insertions(+), 1 deletion(-)
+
+diff --git a/icu4c/source/common/unistr.cpp b/icu4c/source/common/unistr.cpp
+index 901bb3358ba..077b4d6ef20 100644
+--- a/icu4c/source/common/unistr.cpp
++++ b/icu4c/source/common/unistr.cpp
+@@ -1563,7 +1563,11 @@ UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLeng
+ }
+
+ int32_t oldLength = length();
+- int32_t newLength = oldLength + srcLength;
++ int32_t newLength;
++ if (uprv_add32_overflow(oldLength, srcLength, &newLength)) {
++ setToBogus();
++ return *this;
++ }
+
+ // Check for append onto ourself
+ const UChar* oldArray = getArrayStart();
+diff --git a/icu4c/source/test/intltest/ustrtest.cpp b/icu4c/source/test/intltest/ustrtest.cpp
+index b6515ea813c..ad38bdf53a3 100644
+--- a/icu4c/source/test/intltest/ustrtest.cpp
++++ b/icu4c/source/test/intltest/ustrtest.cpp
+@@ -67,6 +67,7 @@ void UnicodeStringTest::runIndexedTest( int32_t index, UBool exec, const char* &
+ TESTCASE_AUTO(TestWCharPointers);
+ TESTCASE_AUTO(TestNullPointers);
+ TESTCASE_AUTO(TestUnicodeStringInsertAppendToSelf);
++ TESTCASE_AUTO(TestLargeAppend);
+ TESTCASE_AUTO_END;
+ }
+
+@@ -2310,3 +2311,64 @@ void UnicodeStringTest::TestUnicodeStringInsertAppendToSelf() {
+ str.insert(2, sub);
+ assertEquals("", u"abbcdcde", str);
+ }
++
++void UnicodeStringTest::TestLargeAppend() {
++ if(quick) return;
++
++ IcuTestErrorCode status(*this, "TestLargeAppend");
++ // Make a large UnicodeString
++ int32_t len = 0xAFFFFFF;
++ UnicodeString str;
++ char16_t *buf = str.getBuffer(len);
++ // A fast way to set buffer to valid Unicode.
++ // 4E4E is a valid unicode character
++ uprv_memset(buf, 0x4e, len * 2);
++ str.releaseBuffer(len);
++ UnicodeString dest;
++ // Append it 16 times
++ // 0xAFFFFFF times 16 is 0xA4FFFFF1,
++ // which is greater than INT32_MAX, which is 0x7FFFFFFF.
++ int64_t total = 0;
++ for (int32_t i = 0; i < 16; i++) {
++ dest.append(str);
++ total += len;
++ if (total <= INT32_MAX) {
++ assertFalse("dest is not bogus", dest.isBogus());
++ } else {
++ assertTrue("dest should be bogus", dest.isBogus());
++ }
++ }
++ dest.remove();
++ total = 0;
++ for (int32_t i = 0; i < 16; i++) {
++ dest.append(str);
++ total += len;
++ if (total + len <= INT32_MAX) {
++ assertFalse("dest is not bogus", dest.isBogus());
++ } else if (total <= INT32_MAX) {
++ // Check that a string of exactly the maximum size works
++ UnicodeString str2;
++ int32_t remain = INT32_MAX - total;
++ char16_t *buf2 = str2.getBuffer(remain);
++ if (buf2 == nullptr) {
++ // if somehow memory allocation fail, return the test
++ return;
++ }
++ uprv_memset(buf2, 0x4e, remain * 2);
++ str2.releaseBuffer(remain);
++ dest.append(str2);
++ total += remain;
++ assertEquals("When a string of exactly the maximum size works", (int64_t)INT32_MAX, total);
++ assertEquals("When a string of exactly the maximum size works", INT32_MAX, dest.length());
++ assertFalse("dest is not bogus", dest.isBogus());
++
++ // Check that a string size+1 goes bogus
++ str2.truncate(1);
++ dest.append(str2);
++ total++;
++ assertTrue("dest should be bogus", dest.isBogus());
++ } else {
++ assertTrue("dest should be bogus", dest.isBogus());
++ }
++ }
++}
+diff --git a/icu4c/source/test/intltest/ustrtest.h b/icu4c/source/test/intltest/ustrtest.h
+index 218befdcc68..4a356a92c7a 100644
+--- a/icu4c/source/test/intltest/ustrtest.h
++++ b/icu4c/source/test/intltest/ustrtest.h
+@@ -97,6 +97,7 @@ class UnicodeStringTest: public IntlTest {
+ void TestWCharPointers();
+ void TestNullPointers();
+ void TestUnicodeStringInsertAppendToSelf();
++ void TestLargeAppend();
+ };
+
+ #endif
diff --git a/meta/recipes-support/icu/icu_64.2.bb b/meta/recipes-support/icu/icu_64.2.bb
index 10bac7aac0..d09776f4bc 100644
--- a/meta/recipes-support/icu/icu_64.2.bb
+++ b/meta/recipes-support/icu/icu_64.2.bb
@@ -6,18 +6,24 @@ def icu_download_version(d):
pvsplit = d.getVar('PV').split('.')
return pvsplit[0] + "_" + pvsplit[1]
+def icu_download_folder(d):
+ pvsplit = d.getVar('PV').split('.')
+ return pvsplit[0] + "-" + pvsplit[1]
+
ICU_PV = "${@icu_download_version(d)}"
+ICU_FOLDER = "${@icu_download_folder(d)}"
# http://errors.yoctoproject.org/Errors/Details/20486/
ARM_INSTRUCTION_SET_armv4 = "arm"
ARM_INSTRUCTION_SET_armv5 = "arm"
-BASE_SRC_URI = "http://download.icu-project.org/files/icu4c/${PV}/icu4c-${ICU_PV}-src.tgz"
+BASE_SRC_URI = "https://github.com/unicode-org/icu/releases/download/release-${ICU_FOLDER}/icu4c-${ICU_PV}-src.tgz"
SRC_URI = "${BASE_SRC_URI} \
file://icu-pkgdata-large-cmd.patch \
file://fix-install-manx.patch \
file://0001-Fix-big-endian-build.patch \
file://0001-icu-Added-armeb-support.patch \
+ file://CVE-2020-10531.patch;striplevel=3 \
"
SRC_URI_append_class-target = "\
@@ -26,5 +32,5 @@ SRC_URI_append_class-target = "\
SRC_URI[md5sum] = "a3d18213beec454e3cdec9a3116d6b05"
SRC_URI[sha256sum] = "627d5d8478e6d96fc8c90fed4851239079a561a6a8b9e48b0892f24e82d31d6c"
-UPSTREAM_CHECK_REGEX = "(?P<pver>\d+(\.\d+)+)/"
-UPSTREAM_CHECK_URI = "http://download.icu-project.org/files/icu4c/"
+UPSTREAM_CHECK_REGEX = "icu4c-(?P<pver>\d+(_\d+)+)-src"
+UPSTREAM_CHECK_URI = "https://github.com/unicode-org/icu/releases"
diff --git a/meta/recipes-support/iso-codes/iso-codes_4.3.bb b/meta/recipes-support/iso-codes/iso-codes_4.3.bb
index 5651a96c66..566c147690 100644
--- a/meta/recipes-support/iso-codes/iso-codes_4.3.bb
+++ b/meta/recipes-support/iso-codes/iso-codes_4.3.bb
@@ -5,7 +5,7 @@ BUGTRACKER = "https://salsa.debian.org/iso-codes-team/iso-codes/issues"
LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
-SRC_URI = "git://salsa.debian.org/iso-codes-team/iso-codes.git;protocol=http"
+SRC_URI = "git://salsa.debian.org/iso-codes-team/iso-codes.git;protocol=http;branch=main;"
SRCREV = "43398a317371e309361ce43072603863cb2f57e1"
# inherit gettext cannot be used, because it adds gettext-native to BASEDEPENDS which
diff --git a/meta/recipes-support/libevdev/libevdev/determinism.patch b/meta/recipes-support/libevdev/libevdev/determinism.patch
new file mode 100644
index 0000000000..33a6076b78
--- /dev/null
+++ b/meta/recipes-support/libevdev/libevdev/determinism.patch
@@ -0,0 +1,34 @@
+The order of dict values is not deterministic leading to differing header file generation.
+Sort to remove this inconsistency.
+
+RP 2020/2/7
+
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+Upstream-Status: Pending
+
+Index: a/libevdev/make-event-names.py
+===================================================================
+--- a/libevdev/make-event-names.py
++++ b/libevdev/make-event-names.py
+@@ -67,10 +67,10 @@ def print_bits(bits, prefix):
+ if not hasattr(bits, prefix):
+ return
+ print("static const char * const %s_map[%s_MAX + 1] = {" % (prefix, prefix.upper()))
+- for val, name in list(getattr(bits, prefix).items()):
++ for val, name in sorted(list(getattr(bits, prefix).items())):
+ print(" [%s] = \"%s\"," % (name, name))
+ if prefix == "key":
+- for val, name in list(getattr(bits, "btn").items()):
++ for val, name in sorted(list(getattr(bits, "btn").items())):
+ print(" [%s] = \"%s\"," % (name, name))
+ print("};")
+ print("")
+@@ -111,7 +111,7 @@ def print_lookup(bits, prefix):
+ if not hasattr(bits, prefix):
+ return
+
+- names = list(getattr(bits, prefix).items())
++ names = sorted(list(getattr(bits, prefix).items()))
+ if prefix == "btn":
+ names = names + btn_additional;
+
diff --git a/meta/recipes-support/libevdev/libevdev_1.8.0.bb b/meta/recipes-support/libevdev/libevdev_1.8.0.bb
index 84274987d7..46ed5d786a 100644
--- a/meta/recipes-support/libevdev/libevdev_1.8.0.bb
+++ b/meta/recipes-support/libevdev/libevdev_1.8.0.bb
@@ -6,7 +6,8 @@ LICENSE = "MIT-X"
LIC_FILES_CHKSUM = "file://COPYING;md5=75aae0d38feea6fda97ca381cb9132eb \
file://libevdev/libevdev.h;endline=21;md5=7ff4f0b5113252c2f1a828e0bbad98d1"
-SRC_URI = "http://www.freedesktop.org/software/libevdev/${BP}.tar.xz"
+SRC_URI = "http://www.freedesktop.org/software/libevdev/${BP}.tar.xz \
+ file://determinism.patch"
SRC_URI[md5sum] = "879631080be18526737e33b63d848039"
SRC_URI[sha256sum] = "20d3cae4efd277f485abdf8f2a7c46588e539998b5a08c2c4d368218379d4211"
diff --git a/meta/recipes-support/libexif/libexif/CVE-2020-13114.patch b/meta/recipes-support/libexif/libexif/CVE-2020-13114.patch
new file mode 100644
index 0000000000..06b8b46c21
--- /dev/null
+++ b/meta/recipes-support/libexif/libexif/CVE-2020-13114.patch
@@ -0,0 +1,73 @@
+From 47f51be021f4dfd800d4ff4630659887378baa3a Mon Sep 17 00:00:00 2001
+From: Dan Fandrich <dan@coneharvesters.com>
+Date: Sat, 16 May 2020 19:32:30 +0200
+Subject: [PATCH] Add a failsafe on the maximum number of Canon MakerNote
+
+ subtags.
+
+A malicious file could be crafted to cause extremely large values in some
+tags without tripping any buffer range checks. This is bad with the libexif
+representation of Canon MakerNotes because some arrays are turned into
+individual tags that the application must loop around.
+
+The largest value I've seen for failsafe_size in a (very small) sample of valid
+Canon files is <5000. The limit is set two orders of magnitude larger to avoid
+tripping up falsely in case some models use much larger values.
+
+Patch from Google.
+
+CVE-2020-13114
+
+Upstream-Status: Backport [https://github.com/libexif/libexif/commit/e6a38a1a23ba94d139b1fa2cd4519fdcfe3c9bab]
+CVE: CVE-2020-13114
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ libexif/canon/exif-mnote-data-canon.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/libexif/canon/exif-mnote-data-canon.c b/libexif/canon/exif-mnote-data-canon.c
+index eb53598..72fd7a3 100644
+--- a/libexif/canon/exif-mnote-data-canon.c
++++ b/libexif/canon/exif-mnote-data-canon.c
+@@ -32,6 +32,9 @@
+
+ #define DEBUG
+
++/* Total size limit to prevent abuse by DoS */
++#define FAILSAFE_SIZE_MAX 1000000L
++
+ static void
+ exif_mnote_data_canon_clear (ExifMnoteDataCanon *n)
+ {
+@@ -202,6 +205,7 @@ exif_mnote_data_canon_load (ExifMnoteData *ne,
+ ExifMnoteDataCanon *n = (ExifMnoteDataCanon *) ne;
+ ExifShort c;
+ size_t i, tcount, o, datao;
++ long failsafe_size = 0;
+
+ if (!n || !buf || !buf_size) {
+ exif_log (ne->log, EXIF_LOG_CODE_CORRUPT_DATA,
+@@ -280,6 +284,23 @@ exif_mnote_data_canon_load (ExifMnoteData *ne,
+ memcpy (n->entries[tcount].data, buf + dataofs, s);
+ }
+
++ /* Track the size of decoded tag data. A malicious file could
++ * be crafted to cause extremely large values here without
++ * tripping any buffer range checks. This is especially bad
++ * with the libexif representation of Canon MakerNotes because
++ * some arrays are turned into individual tags that the
++ * application must loop around. */
++ failsafe_size += mnote_canon_entry_count_values(&n->entries[tcount]);
++
++ if (failsafe_size > FAILSAFE_SIZE_MAX) {
++ /* Abort if the total size of the data in the tags extraordinarily large, */
++ exif_mem_free (ne->mem, n->entries[tcount].data);
++ exif_log (ne->log, EXIF_LOG_CODE_CORRUPT_DATA,
++ "ExifMnoteCanon", "Failsafe tag size overflow (%lu > %ld)",
++ failsafe_size, FAILSAFE_SIZE_MAX);
++ break;
++ }
++
+ /* Tag was successfully parsed */
+ ++tcount;
+ }
diff --git a/meta/recipes-support/libexif/libexif_0.6.21.bb b/meta/recipes-support/libexif/libexif_0.6.21.bb
index d847beab18..3f6fa32b25 100644
--- a/meta/recipes-support/libexif/libexif_0.6.21.bb
+++ b/meta/recipes-support/libexif/libexif_0.6.21.bb
@@ -7,7 +7,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=243b725d71bb5df4a1e5920b344b86ad"
SRC_URI = "${SOURCEFORGE_MIRROR}/libexif/libexif-${PV}.tar.bz2 \
file://CVE-2017-7544.patch \
file://CVE-2016-6328.patch \
- file://CVE-2018-20030.patch"
+ file://CVE-2018-20030.patch \
+ file://CVE-2020-13114.patch \
+"
SRC_URI[md5sum] = "27339b89850f28c8f1c237f233e05b27"
SRC_URI[sha256sum] = "16cdaeb62eb3e6dfab2435f7d7bccd2f37438d21c5218ec4e58efa9157d4d41a"
diff --git a/meta/recipes-support/libgcrypt/files/determinism.patch b/meta/recipes-support/libgcrypt/files/determinism.patch
new file mode 100644
index 0000000000..ad0b8c7950
--- /dev/null
+++ b/meta/recipes-support/libgcrypt/files/determinism.patch
@@ -0,0 +1,32 @@
+gnutls detects our outer git trees and injects that revision into its objects.
+That isn't deterministic so stop it. Also ensure we're not marked as a development
+build as its git detection is faulty.
+
+RP 2020/2/6
+
+Upstream-Status: Pending
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+
+Index: libgcrypt-1.8.5/configure.ac
+===================================================================
+--- libgcrypt-1.8.5.orig/configure.ac
++++ libgcrypt-1.8.5/configure.ac
+@@ -45,7 +45,7 @@ m4_define([mym4_revision_dec],
+ m4_define([mym4_betastring],
+ m4_esyscmd_s([git describe --match 'libgcrypt-[0-9].*[0-9]' --long|\
+ awk -F- '$3!=0{print"-beta"$3}']))
+-m4_define([mym4_isgit],m4_if(mym4_betastring,[],[no],[yes]))
++m4_define([mym4_isgit],[no])
+ m4_define([mym4_full_version],[mym4_version[]mym4_betastring])
+
+ AC_INIT([libgcrypt],[mym4_full_version],[http://bugs.gnupg.org])
+@@ -2575,7 +2575,7 @@ AM_CONDITIONAL([BUILD_DOC], [test "x$bui
+ #
+ # Provide information about the build.
+ #
+-BUILD_REVISION="mym4_revision"
++BUILD_REVISION="None"
+ AC_SUBST(BUILD_REVISION)
+ AC_DEFINE_UNQUOTED(BUILD_REVISION, "$BUILD_REVISION",
+ [GIT commit id revision used to build this package])
diff --git a/meta/recipes-support/libgcrypt/libgcrypt_1.8.4.bb b/meta/recipes-support/libgcrypt/libgcrypt_1.8.4.bb
index 1bd355133e..92eb2d257a 100644
--- a/meta/recipes-support/libgcrypt/libgcrypt_1.8.4.bb
+++ b/meta/recipes-support/libgcrypt/libgcrypt_1.8.4.bb
@@ -26,6 +26,7 @@ SRC_URI = "${GNUPG_MIRROR}/libgcrypt/libgcrypt-${PV}.tar.bz2 \
file://0003-GCM-move-look-up-table-to-.data-section-and-unshare-.patch \
file://0001-ecc-Add-mitigation-against-timing-attack.patch \
file://0001-dsa-ecdsa-Fix-use-of-nonce-use-larger-one.patch \
+ file://determinism.patch \
"
SRC_URI[md5sum] = "fbfdaebbbc6d7e5fbbf6ffdb3e139573"
SRC_URI[sha256sum] = "f638143a0672628fde0cad745e9b14deb85dffb175709cacc1f4fe24b93f2227"
diff --git a/meta/recipes-support/libpcre/libpcre/CVE-2020-14155.patch b/meta/recipes-support/libpcre/libpcre/CVE-2020-14155.patch
new file mode 100644
index 0000000000..183512fd7d
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre/CVE-2020-14155.patch
@@ -0,0 +1,41 @@
+--- pcre-8.43/pcre_compile.c 2020-07-05 22:26:25.310501521 +0530
++++ pcre-8.43/pcre_compile1.c 2020-07-05 22:30:22.254489562 +0530
+
+CVE: CVE-2020-14155
+Upstream-Status: Backport [https://vcs.pcre.org/pcre/code/trunk/pcre_compile.c?view=patch&r1=1761&r2=1760&pathrev=1761]
+Signed-off-by: Rahul Taya<Rahul.Taya@kpit.com>
+
+@@ -6,7 +6,7 @@
+ and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+- Copyright (c) 1997-2018 University of Cambridge
++ Copyright (c) 1997-2020 University of Cambridge
+
+ -----------------------------------------------------------------------------
+ Redistribution and use in source and binary forms, with or without
+@@ -7130,17 +7130,19 @@
+ int n = 0;
+ ptr++;
+ while(IS_DIGIT(*ptr))
++ {
+ n = n * 10 + *ptr++ - CHAR_0;
++ if (n > 255)
++ {
++ *errorcodeptr = ERR38;
++ goto FAILED;
++ }
++ }
+ if (*ptr != CHAR_RIGHT_PARENTHESIS)
+ {
+ *errorcodeptr = ERR39;
+ goto FAILED;
+ }
+- if (n > 255)
+- {
+- *errorcodeptr = ERR38;
+- goto FAILED;
+- }
+ *code++ = n;
+ PUT(code, 0, (int)(ptr - cd->start_pattern + 1)); /* Pattern offset */
+ PUT(code, LINK_SIZE, 0); /* Default length */
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2019-20454.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2019-20454.patch
new file mode 100644
index 0000000000..51f95a7097
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2019-20454.patch
@@ -0,0 +1,19 @@
+Upstream-Status: Backport [https://vcs.pcre.org/pcre2/code/trunk/src/pcre2_jit_compile.c?r1=1092&r2=1091&pathrev=1092]
+CVE: CVE-2020-8002
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+--- pcre2-10.30/src/pcre2_jit_compile.c 2019/05/13 16:26:17 1091
++++ pcre2-10.30/src/pcre2_jit_compile.c 2019/05/13 16:38:18 1092
+@@ -8571,7 +8571,10 @@
+ PCRE2_SPTR bptr;
+ uint32_t c;
+
+-GETCHARINC(c, cc);
++/* Patch by PH */
++/* GETCHARINC(c, cc); */
++
++c = *cc++;
+ #if PCRE2_CODE_UNIT_WIDTH == 32
+ if (c >= 0x110000)
+ return NULL;
+
diff --git a/meta/recipes-support/libpcre/libpcre2_10.33.bb b/meta/recipes-support/libpcre/libpcre2_10.33.bb
index 50b26753b4..1020df99b8 100644
--- a/meta/recipes-support/libpcre/libpcre2_10.33.bb
+++ b/meta/recipes-support/libpcre/libpcre2_10.33.bb
@@ -12,6 +12,7 @@ LIC_FILES_CHKSUM = "file://LICENCE;md5=b1588d3bb4cb0e1f5a597d908f8c5b37"
SRC_URI = "https://ftp.pcre.org/pub/pcre/pcre2-${PV}.tar.bz2 \
file://pcre-cross.patch \
+ file://CVE-2019-20454.patch \
"
SRC_URI[md5sum] = "80b355f2dce909a2e2424f5c79eddb44"
diff --git a/meta/recipes-support/libpcre/libpcre_8.43.bb b/meta/recipes-support/libpcre/libpcre_8.43.bb
index b97af08b25..60ece64504 100644
--- a/meta/recipes-support/libpcre/libpcre_8.43.bb
+++ b/meta/recipes-support/libpcre/libpcre_8.43.bb
@@ -12,6 +12,7 @@ SRC_URI = "https://ftp.pcre.org/pub/pcre/pcre-${PV}.tar.bz2 \
file://out-of-tree.patch \
file://run-ptest \
file://Makefile \
+ file://CVE-2020-14155.patch \
"
SRC_URI[md5sum] = "636222e79e392c3d95dcc545f24f98c4"
diff --git a/meta/recipes-support/nss/nss/0001-Bug-1631576-Force-a-fixed-length-for-DSA-exponentiat.patch b/meta/recipes-support/nss/nss/0001-Bug-1631576-Force-a-fixed-length-for-DSA-exponentiat.patch
new file mode 100644
index 0000000000..517c277ae0
--- /dev/null
+++ b/meta/recipes-support/nss/nss/0001-Bug-1631576-Force-a-fixed-length-for-DSA-exponentiat.patch
@@ -0,0 +1,110 @@
+From 5942c26888ba12ad5e0d92fb62f23d7cde6dc159 Mon Sep 17 00:00:00 2001
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Mon, 13 Jul 2020 06:25:56 +0000
+Subject: [PATCH] Bug 1631576 - Force a fixed length for DSA exponentiation
+ r=pereida,bbrumley
+
+Differential Revision: https://phabricator.services.mozilla.com/D72011
+
+Upstream-Status: Backport [https://hg.mozilla.org/projects/nss/rev/daa823a4a29bcef0fec33a379ec83857429aea2e]
+
+Authored-by: Robert Relyea <rrelyea@redhat.com>
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+---
+ nss/lib/freebl/dsa.c | 45 ++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 35 insertions(+), 10 deletions(-)
+
+diff --git a/nss/lib/freebl/dsa.c b/nss/lib/freebl/dsa.c
+index aef3539..389c9de 100644
+--- a/nss/lib/freebl/dsa.c
++++ b/nss/lib/freebl/dsa.c
+@@ -313,13 +313,14 @@ DSA_NewKeyFromSeed(const PQGParams *params,
+
+ static SECStatus
+ dsa_SignDigest(DSAPrivateKey *key, SECItem *signature, const SECItem *digest,
+- const unsigned char *kb)
++ const unsigned char *kbytes)
+ {
+ mp_int p, q, g; /* PQG parameters */
+ mp_int x, k; /* private key & pseudo-random integer */
+ mp_int r, s; /* tuple (r, s) is signature) */
+ mp_int t; /* holding tmp values */
+ mp_int ar; /* holding blinding values */
++ mp_digit fuzz; /* blinding multiplier for q */
+ mp_err err = MP_OKAY;
+ SECStatus rv = SECSuccess;
+ unsigned int dsa_subprime_len, dsa_signature_len, offset;
+@@ -373,6 +374,7 @@ dsa_SignDigest(DSAPrivateKey *key, SECItem *signature, const SECItem *digest,
+ CHECK_MPI_OK(mp_init(&s));
+ CHECK_MPI_OK(mp_init(&t));
+ CHECK_MPI_OK(mp_init(&ar));
++
+ /*
+ ** Convert stored PQG and private key into MPI integers.
+ */
+@@ -380,14 +382,28 @@ dsa_SignDigest(DSAPrivateKey *key, SECItem *signature, const SECItem *digest,
+ SECITEM_TO_MPINT(key->params.subPrime, &q);
+ SECITEM_TO_MPINT(key->params.base, &g);
+ SECITEM_TO_MPINT(key->privateValue, &x);
+- OCTETS_TO_MPINT(kb, &k, dsa_subprime_len);
++ OCTETS_TO_MPINT(kbytes, &k, dsa_subprime_len);
++
++ /* k blinding create a single value that has the high bit set in
++ * the mp_digit*/
++ if (RNG_GenerateGlobalRandomBytes(&fuzz, sizeof(mp_digit)) != SECSuccess) {
++ PORT_SetError(SEC_ERROR_NEED_RANDOM);
++ rv = SECFailure;
++ goto cleanup;
++ }
++ fuzz |= 1ULL << ((sizeof(mp_digit) * PR_BITS_PER_BYTE - 1));
+ /*
+ ** FIPS 186-1, Section 5, Step 1
+ **
+ ** r = (g**k mod p) mod q
+ */
+- CHECK_MPI_OK(mp_exptmod(&g, &k, &p, &r)); /* r = g**k mod p */
+- CHECK_MPI_OK(mp_mod(&r, &q, &r)); /* r = r mod q */
++ CHECK_MPI_OK(mp_mul_d(&q, fuzz, &t)); /* t = q*fuzz */
++ CHECK_MPI_OK(mp_add(&k, &t, &t)); /* t = k+q*fuzz */
++ /* length of t is now fixed, bits in k have been blinded */
++ CHECK_MPI_OK(mp_exptmod(&g, &t, &p, &r)); /* r = g**t mod p */
++ /* r is now g**(k+q*fuzz) == g**k mod p */
++ CHECK_MPI_OK(mp_mod(&r, &q, &r)); /* r = r mod q */
++
+ /*
+ ** FIPS 186-1, Section 5, Step 2
+ **
+@@ -411,15 +427,24 @@ dsa_SignDigest(DSAPrivateKey *key, SECItem *signature, const SECItem *digest,
+ /* Using mp_invmod on k directly would leak bits from k. */
+ CHECK_MPI_OK(mp_mul(&k, &ar, &k)); /* k = k * ar */
+ CHECK_MPI_OK(mp_mulmod(&k, &t, &q, &k)); /* k = k * t mod q */
+- CHECK_MPI_OK(mp_invmod(&k, &q, &k)); /* k = k**-1 mod q */
++ /* k is now k*t*ar */
++ CHECK_MPI_OK(mp_invmod(&k, &q, &k)); /* k = k**-1 mod q */
++ /* k is now (k*t*ar)**-1 */
+ CHECK_MPI_OK(mp_mulmod(&k, &t, &q, &k)); /* k = k * t mod q */
+- SECITEM_TO_MPINT(localDigest, &s); /* s = HASH(M) */
++ /* k is now (k*ar)**-1 */
++ SECITEM_TO_MPINT(localDigest, &s); /* s = HASH(M) */
+ /* To avoid leaking secret bits here the addition is blinded. */
+- CHECK_MPI_OK(mp_mul(&x, &ar, &x)); /* x = x * ar */
+- CHECK_MPI_OK(mp_mulmod(&x, &r, &q, &x)); /* x = x * r mod q */
++ CHECK_MPI_OK(mp_mul(&x, &ar, &x)); /* x = x * ar */
++ /* x is now x*ar */
++ CHECK_MPI_OK(mp_mulmod(&x, &r, &q, &x)); /* x = x * r mod q */
++ /* x is now x*r*ar */
+ CHECK_MPI_OK(mp_mulmod(&s, &ar, &q, &t)); /* t = s * ar mod q */
+- CHECK_MPI_OK(mp_add(&t, &x, &s)); /* s = t + x */
+- CHECK_MPI_OK(mp_mulmod(&s, &k, &q, &s)); /* s = s * k mod q */
++ /* t is now hash(M)*ar */
++ CHECK_MPI_OK(mp_add(&t, &x, &s)); /* s = t + x */
++ /* s is now (HASH(M)+x*r)*ar */
++ CHECK_MPI_OK(mp_mulmod(&s, &k, &q, &s)); /* s = s * k mod q */
++ /* s is now (HASH(M)+x*r)*ar*(k*ar)**-1 = (k**-1)*(HASH(M)+x*r) */
++
+ /*
+ ** verify r != 0 and s != 0
+ ** mentioned as optional in FIPS 186-1.
+--
+2.18.1
+
diff --git a/meta/recipes-support/nss/nss_3.45.bb b/meta/recipes-support/nss/nss_3.45.bb
index c8005a5b3a..9fe27af5db 100644
--- a/meta/recipes-support/nss/nss_3.45.bb
+++ b/meta/recipes-support/nss/nss_3.45.bb
@@ -32,6 +32,7 @@ SRC_URI = "http://ftp.mozilla.org/pub/mozilla.org/security/nss/releases/${VERSIO
file://blank-cert9.db \
file://blank-key4.db \
file://system-pkcs11.txt \
+ file://0001-Bug-1631576-Force-a-fixed-length-for-DSA-exponentiat.patch \
"
SRC_URI[md5sum] = "f1752d7223ee9d910d551e57264bafa8"
diff --git a/meta/recipes-support/sqlite/files/CVE-2020-11655.patch b/meta/recipes-support/sqlite/files/CVE-2020-11655.patch
new file mode 100644
index 0000000000..c2360cb867
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2020-11655.patch
@@ -0,0 +1,32 @@
+From a4601326d61bf1a11151ac6b78b50804bfd03b4d Mon Sep 17 00:00:00 2001
+From: Sakib Sajal <sakib.sajal@windriver.com>
+Date: Thu, 30 Apr 2020 10:46:16 -0700
+Subject: [PATCH 2/2] In the event of a semantic error in an aggregate query,
+ early-out the resetAccumulator() function to prevent problems due to
+ incomplete or incorrect initialization of the AggInfo object. Fix for ticket
+ [af4556bb5c285c08].
+
+FossilOrigin-Name: 4a302b42c7bf5e11ddb5522ca999f74aba397d3a7eb91b1844bb02852f772441
+Upstream-Status: Backport [c415d91007e1680e4eb17def583b202c3c83c718]
+
+CVE: CVE-2020-11655
+Signed-off-by: Sakib Sajal <sakib.sajal@windriver.com>
+---
+ sqlite3.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index 1df6633..726adf7 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -133242,6 +133242,7 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
+ struct AggInfo_func *pFunc;
+ int nReg = pAggInfo->nFunc + pAggInfo->nColumn;
+ if( nReg==0 ) return;
++ if( pParse->nErr ) return;
+ #ifdef SQLITE_DEBUG
+ /* Verify that all AggInfo registers are within the range specified by
+ ** AggInfo.mnReg..AggInfo.mxReg */
+--
+2.17.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3/CVE-2019-19244.patch b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19244.patch
new file mode 100644
index 0000000000..3f70979acc
--- /dev/null
+++ b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19244.patch
@@ -0,0 +1,33 @@
+CVE: CVE-2019-19244
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 0f690d4ae5ffe656762fdbb7f36cc4c2dcbb2d9d Mon Sep 17 00:00:00 2001
+From: dan <dan@noemail.net>
+Date: Fri, 22 Nov 2019 10:14:01 +0000
+Subject: [PATCH] Fix a crash that could occur if a sub-select that uses both
+ DISTINCT and window functions also used an ORDER BY that is the same as its
+ select list.
+
+Amalgamation version of the patch:
+FossilOrigin-Name: bcdd66c1691955c697f3d756c2b035acfe98f6aad72e90b0021bab6e9023b3ba
+---
+ sqlite3.c | 5 +++--
+ sqlite3.h | 2 +-
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index 8fd740b..db1c649 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -131679,6 +131679,7 @@ SQLITE_PRIVATE int sqlite3Select(
+ */
+ if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct
+ && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0
++ && p->pWin==0
+ ){
+ p->selFlags &= ~SF_Distinct;
+ pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0);
+--
+2.24.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3/CVE-2019-19923.patch b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19923.patch
new file mode 100644
index 0000000000..b1b866b250
--- /dev/null
+++ b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19923.patch
@@ -0,0 +1,50 @@
+CVE: CVE-2019-19923
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From b64463719dc53bde98b0ce3930b10a32560c3a02 Mon Sep 17 00:00:00 2001
+From: "D. Richard Hipp" <drh@hwaci.com>
+Date: Wed, 18 Dec 2019 20:51:58 +0000
+Subject: [PATCH] Continue to back away from the LEFT JOIN optimization of
+ check-in [41c27bc0ff1d3135] by disallowing query flattening if the outer
+ query is DISTINCT. Without this fix, if an index scan is run on the table
+ within the view on the right-hand side of the LEFT JOIN, stale result
+ registers might be accessed yielding incorrect results, and/or an
+ OP_IfNullRow opcode might be invoked on the un-opened table, resulting in a
+ NULL-pointer dereference. This problem was found by the Yongheng and Rui
+ fuzzer.
+
+FossilOrigin-Name: 862974312edf00e9d1068115d1a39b7235b7db68b6d86b81d38a12f025a4748e
+---
+ sqlite3.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index d29da07..5bc06c8 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -129216,6 +129216,7 @@ static void substSelect(
+ ** (3b) the FROM clause of the subquery may not contain a virtual
+ ** table and
+ ** (3c) the outer query may not be an aggregate.
++** (3d) the outer query may not be DISTINCT.
+ **
+ ** (4) The subquery can not be DISTINCT.
+ **
+@@ -129412,8 +129413,11 @@ static int flattenSubquery(
+ */
+ if( (pSubitem->fg.jointype & JT_OUTER)!=0 ){
+ isLeftJoin = 1;
+- if( pSubSrc->nSrc>1 || isAgg || IsVirtual(pSubSrc->a[0].pTab) ){
+- /* (3a) (3c) (3b) */
++ if( pSubSrc->nSrc>1 /* (3a) */
++ || isAgg /* (3b) */
++ || IsVirtual(pSubSrc->a[0].pTab) /* (3c) */
++ || (p->selFlags & SF_Distinct)!=0 /* (3d) */
++ ){
+ return 0;
+ }
+ }
+--
+2.24.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3/CVE-2019-19924.patch b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19924.patch
new file mode 100644
index 0000000000..80d5edbb0c
--- /dev/null
+++ b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19924.patch
@@ -0,0 +1,65 @@
+CVE: CVE-2019-19924
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 854fe21e8a987f84da81f6bb9e90abc5355c6621 Mon Sep 17 00:00:00 2001
+From: "D. Richard Hipp" <drh@hwaci.com>
+Date: Thu, 19 Dec 2019 20:37:32 +0000
+Subject: [PATCH] When an error occurs while rewriting the parser tree for
+ window functions in the sqlite3WindowRewrite() routine, make sure that
+ pParse->nErr is set, and make sure that this shuts down any subsequent code
+ generation that might depend on the transformations that were implemented.
+ This fixes a problem discovered by the Yongheng and Rui fuzzer.
+
+Amalgamation format of backported patch
+FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
+---
+ sqlite3.c | 16 +++++++++++-----
+ sqlite3.h | 2 +-
+ 2 files changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index 408ec4c..857c28e 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -77798,7 +77798,8 @@ SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){
+ */
+ static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){
+ assert( p->nOp>0 || p->aOp==0 );
+- assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed );
++ assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed
++ || p->pParse->nErr>0 );
+ if( p->nOp ){
+ assert( p->aOp );
+ sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment);
+@@ -97872,6 +97873,7 @@ static int codeCompare(
+ int addr;
+ CollSeq *p4;
+
++ if( pParse->nErr ) return 0;
+ p4 = sqlite3BinaryCompareCollSeq(pParse, pLeft, pRight);
+ p5 = binaryCompareP5(pLeft, pRight, jumpIfNull);
+ addr = sqlite3VdbeAddOp4(pParse->pVdbe, opcode, in2, dest, in1,
+@@ -147627,7 +147629,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
+
+ pTab = sqlite3DbMallocZero(db, sizeof(Table));
+ if( pTab==0 ){
+- return SQLITE_NOMEM;
++ return sqlite3ErrorToParser(db, SQLITE_NOMEM);
+ }
+
+ p->pSrc = 0;
+@@ -147731,6 +147733,10 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
+ sqlite3DbFree(db, pTab);
+ }
+
++ if( rc && pParse->nErr==0 ){
++ assert( pParse->db->mallocFailed );
++ return sqlite3ErrorToParser(pParse->db, SQLITE_NOMEM);
++ }
+ return rc;
+ }
+
+--
+2.24.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3/CVE-2019-19925.patch b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19925.patch
new file mode 100644
index 0000000000..ffc2c6afff
--- /dev/null
+++ b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19925.patch
@@ -0,0 +1,33 @@
+CVE: CVE-2019-19925
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From e92580434d2cdca228649d32f76167492de4f512 Mon Sep 17 00:00:00 2001
+From: "D. Richard Hipp" <drh@hwaci.com>
+Date: Thu, 19 Dec 2019 15:15:40 +0000
+Subject: [PATCH] Fix the zipfile extension so that INSERT works even if the
+ pathname of the file being inserted is a NULL. Bug discovered by the
+ Yongheng and Rui fuzzer.
+
+FossilOrigin-Name: a80f84b511231204658304226de3e075a55afc2e3f39ac063716f7a57f585c06
+---
+ shell.c | 1 +
+ sqlite3.c | 4 ++--
+ sqlite3.h | 2 +-
+ 3 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/shell.c b/shell.c
+index 053180c..404a8d4 100644
+--- a/shell.c
++++ b/shell.c
+@@ -5827,6 +5827,7 @@ static int zipfileUpdate(
+
+ if( rc==SQLITE_OK ){
+ zPath = (const char*)sqlite3_value_text(apVal[2]);
++ if( zPath==0 ) zPath = "";
+ nPath = (int)strlen(zPath);
+ mTime = zipfileGetTime(apVal[4]);
+ }
+--
+2.24.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3/CVE-2019-19926.patch b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19926.patch
new file mode 100644
index 0000000000..92bc7908bc
--- /dev/null
+++ b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19926.patch
@@ -0,0 +1,31 @@
+CVE: CVE-2019-19926
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 4165b1e1e0001165ace9051a70f938099505eadc Mon Sep 17 00:00:00 2001
+From: "D. Richard Hipp" <drh@hwaci.com>
+Date: Thu, 19 Dec 2019 22:08:19 +0000
+Subject: [PATCH] Continuation of [e2bddcd4c55ba3cb]: Add another spot where it
+ is necessary to abort early due to prior errors in sqlite3WindowRewrite().
+
+FossilOrigin-Name: cba2a2a44cdf138a629109bb0ad088ed4ef67fc66bed3e0373554681a39615d2
+---
+ sqlite3.c | 7 ++++---
+ sqlite3.h | 2 +-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index 857c28e..19a474d 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -128427,6 +128427,7 @@ static int multiSelect(
+ }
+ #endif
+ }
++ if( pParse->nErr ) goto multi_select_end;
+
+ /* Compute collating sequences used by
+ ** temporary tables needed to implement the compound select.
+--
+2.24.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3/CVE-2019-19959.patch b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19959.patch
new file mode 100644
index 0000000000..cba8ec9d30
--- /dev/null
+++ b/meta/recipes-support/sqlite/sqlite3/CVE-2019-19959.patch
@@ -0,0 +1,46 @@
+CVE: CVE-2019-19959
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From f83f7e8141ee7cbbf7f2dc8985279a7372b259b6 Mon Sep 17 00:00:00 2001
+From: "D. Richard Hipp" <drh@hwaci.com>
+Date: Mon, 23 Dec 2019 21:04:33 +0000
+Subject: [PATCH] Fix the zipfile() function in the zipfile extension so that
+ it is able to deal with goofy filenames that contain embedded zeros.
+
+FossilOrigin-Name: cc0fb00a128fd0773db5ff7891f7aa577a3671d570166d2cbb30df922344adcf
+---
+ shell.c | 4 ++--
+ sqlite3.c | 4 ++--
+ sqlite3.h | 2 +-
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/shell.c b/shell.c
+index 404a8d4..48065e9 100644
+--- a/shell.c
++++ b/shell.c
+@@ -5841,7 +5841,7 @@ static int zipfileUpdate(
+ zFree = sqlite3_mprintf("%s/", zPath);
+ if( zFree==0 ){ rc = SQLITE_NOMEM; }
+ zPath = (const char*)zFree;
+- nPath++;
++ nPath = (int)strlen(zPath);
+ }
+ }
+
+@@ -6242,11 +6242,11 @@ void zipfileStep(sqlite3_context *pCtx, int nVal, sqlite3_value **apVal){
+ }else{
+ if( zName[nName-1]!='/' ){
+ zName = zFree = sqlite3_mprintf("%s/", zName);
+- nName++;
+ if( zName==0 ){
+ rc = SQLITE_NOMEM;
+ goto zipfile_step_out;
+ }
++ nName = (int)strlen(zName);
+ }else{
+ while( nName>1 && zName[nName-2]=='/' ) nName--;
+ }
+--
+2.24.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3/CVE-2019-20218.patch b/meta/recipes-support/sqlite/sqlite3/CVE-2019-20218.patch
new file mode 100644
index 0000000000..fb6cd6df2d
--- /dev/null
+++ b/meta/recipes-support/sqlite/sqlite3/CVE-2019-20218.patch
@@ -0,0 +1,31 @@
+CVE: CVE-2019-20218
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 6bbd76d34f29f61483791231f2ce579dcadab8a5 Mon Sep 17 00:00:00 2001
+From: Dan Kennedy <danielk1977@gmail.com>
+Date: Fri, 27 Dec 2019 20:54:42 +0000
+Subject: [PATCH] Do not attempt to unwind the WITH stack in the Parse object
+ following an error. This fixes a separate case to [de6e6d68].
+
+FossilOrigin-Name: d29edef93451cc67a5d69c1cce1b1832d9ca8fff1f600afdd51338b74d077b92
+---
+ sqlite3.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index 5bc06c8..408ec4c 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -130570,7 +130570,7 @@ static int selectExpander(Walker *pWalker, Select *p){
+
+ /* Process NATURAL keywords, and ON and USING clauses of joins.
+ */
+- if( db->mallocFailed || sqliteProcessJoin(pParse, p) ){
++ if( pParse->nErr || db->mallocFailed || sqliteProcessJoin(pParse, p) ){
+ return WRC_Abort;
+ }
+
+--
+2.24.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3/CVE-2020-13632.patch b/meta/recipes-support/sqlite/sqlite3/CVE-2020-13632.patch
new file mode 100644
index 0000000000..7af5e91c4c
--- /dev/null
+++ b/meta/recipes-support/sqlite/sqlite3/CVE-2020-13632.patch
@@ -0,0 +1,32 @@
+From 219b8e7e7587df8669d96ce867cdd61ca1c05730 Mon Sep 17 00:00:00 2001
+From: drh <drh@noemail.net>
+Date: Thu, 14 May 2020 23:59:24 +0000
+Subject: [PATCH] Fix a null pointer deference that can occur on a strange
+ matchinfo() query.
+
+FossilOrigin-Name: a4dd148928ea65bd4e1654dfacc3d8057d1f85b8c9939416991d50722e5a720e
+
+Upstream-Status: Backport
+CVE: CVE-2020-13632
+[https://github.com/sqlite/sqlite/commit/219b8e7e7587df8669d96ce867cdd61ca1c05730]
+Signed-off-by: Li Wang <li.wang@windriver.com>
+---
+ sqlite3.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index fd28360..ee455e5 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -177622,7 +177622,7 @@ static int fts3ExprLHits(
+ iStart = pExpr->iPhrase * ((p->nCol + 31) / 32);
+ }
+
+- while( 1 ){
++ if( pIter ) while( 1 ){
+ int nHit = fts3ColumnlistCount(&pIter);
+ if( (pPhrase->iColumn>=pTab->nColumn || pPhrase->iColumn==iCol) ){
+ if( p->flag==FTS3_MATCHINFO_LHITS ){
+--
+2.17.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3_3.29.0.bb b/meta/recipes-support/sqlite/sqlite3_3.29.0.bb
index 34066fbe89..425612bf12 100644
--- a/meta/recipes-support/sqlite/sqlite3_3.29.0.bb
+++ b/meta/recipes-support/sqlite/sqlite3_3.29.0.bb
@@ -4,6 +4,16 @@ LICENSE = "PD"
LIC_FILES_CHKSUM = "file://sqlite3.h;endline=11;md5=786d3dc581eff03f4fd9e4a77ed00c66"
SRC_URI = "http://www.sqlite.org/2019/sqlite-autoconf-${SQLITE_PV}.tar.gz \
- file://0001-Fix-CVE-2019-16168.patch"
+ file://0001-Fix-CVE-2019-16168.patch \
+ file://CVE-2019-19244.patch \
+ file://CVE-2019-19923.patch \
+ file://CVE-2019-19924.patch \
+ file://CVE-2019-19925.patch \
+ file://CVE-2019-19926.patch \
+ file://CVE-2019-19959.patch \
+ file://CVE-2019-20218.patch \
+ file://CVE-2020-11655.patch \
+ file://CVE-2020-13632.patch \
+"
SRC_URI[md5sum] = "8f3dfe83387e62ecb91c7c5c09c688dc"
SRC_URI[sha256sum] = "8e7c1e2950b5b04c5944a981cb31fffbf9d2ddda939d536838ebc854481afd5b"
diff --git a/meta/recipes-support/vim/vim_8.1.1518.bb b/meta/recipes-support/vim/vim_8.1.1518.bb
index 60946a181f..709b6ddb55 100644
--- a/meta/recipes-support/vim/vim_8.1.1518.bb
+++ b/meta/recipes-support/vim/vim_8.1.1518.bb
@@ -8,3 +8,8 @@ BBCLASSEXTEND = "native"
ALTERNATIVE_${PN}_append = " xxd"
ALTERNATIVE_TARGET[xxd] = "${bindir}/xxd"
ALTERNATIVE_LINK_NAME[xxd] = "${bindir}/xxd"
+
+# We override the default in security_flags.inc because vim (not vim-tiny!) will abort
+# in many places for _FORTIFY_SOURCE=2. Security flags become part of CC.
+#
+lcl_maybe_fortify = "${@oe.utils.conditional('DEBUG_BUILD','1','','-D_FORTIFY_SOURCE=1',d)}"
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index 60c9a046f9..b43c725cf8 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -940,8 +940,10 @@ def modify(args, config, basepath, workspace):
'}\n')
if rd.getVarFlag('do_menuconfig','task'):
f.write('\ndo_configure_append() {\n'
- ' cp ${B}/.config ${S}/.config.baseline\n'
- ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ ' if [ ! ${DEVTOOL_DISABLE_MENUCONFIG} ]; then\n'
+ ' cp ${B}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ ' fi\n'
'}\n')
if initial_rev:
f.write('\n# initial_rev: %s\n' % initial_rev)
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index 7cb85a6aa9..3850a93f22 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -127,10 +127,7 @@ def decode_log(logdata):
data = logdata.get("compressed")
data = base64.b64decode(data.encode("utf-8"))
data = zlib.decompress(data)
- try:
- return data.decode("utf-8")
- except UnicodeDecodeError:
- return data
+ return data.decode("utf-8", errors='ignore')
return None
def ptestresult_get_log(results, section):
diff --git a/scripts/lib/wic/engine.py b/scripts/lib/wic/engine.py
index 18776fa8a0..4ccca482e7 100644
--- a/scripts/lib/wic/engine.py
+++ b/scripts/lib/wic/engine.py
@@ -290,7 +290,7 @@ class Disk:
def _get_part_image(self, pnum):
if pnum not in self.partitions:
- raise WicError("Partition %s is not in the image")
+ raise WicError("Partition %s is not in the image" % pnum)
part = self.partitions[pnum]
# check if fstype is supported
for fstype in self.fstypes:
@@ -313,6 +313,9 @@ class Disk:
seek=self.partitions[pnum].start)
def dir(self, pnum, path):
+ if pnum not in self.partitions:
+ raise WicError("Partition %s is not in the image" % pnum)
+
if self.partitions[pnum].fstype.startswith('ext'):
return exec_cmd("{} {} -R 'ls -l {}'".format(self.debugfs,
self._get_part_image(pnum),
diff --git a/scripts/lib/wic/filemap.py b/scripts/lib/wic/filemap.py
index a3919fbcad..c53147c2f1 100644
--- a/scripts/lib/wic/filemap.py
+++ b/scripts/lib/wic/filemap.py
@@ -34,9 +34,11 @@ def get_block_size(file_obj):
# the FIGETBSZ ioctl (number 2).
try:
binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
+ bsize = struct.unpack('I', binary_data)[0]
except OSError:
- raise IOError("Unable to determine block size")
- bsize = struct.unpack('I', binary_data)[0]
+ bsize = None
+
+ # If ioctl causes OSError or give bsize to zero failback to os.fstat
if not bsize:
import os
stat = os.fstat(file_obj.fileno())
diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py
index 3a40fc0ea2..03d84bd1d7 100644
--- a/scripts/lib/wic/help.py
+++ b/scripts/lib/wic/help.py
@@ -523,7 +523,8 @@ DESCRIPTION
Source plugins can also be implemented and added by external
layers - any plugins found in a scripts/lib/wic/plugins/source/
- directory in an external layer will also be made available.
+ or lib/wic/plugins/source/ directory in an external layer will
+ also be made available.
When the wic implementation needs to invoke a partition-specific
implementation, it looks for the plugin that has the same name as
diff --git a/scripts/lib/wic/pluginbase.py b/scripts/lib/wic/pluginbase.py
index f74d6430fd..d9b4e57747 100644
--- a/scripts/lib/wic/pluginbase.py
+++ b/scripts/lib/wic/pluginbase.py
@@ -18,7 +18,7 @@ from wic.misc import get_bitbake_var
PLUGIN_TYPES = ["imager", "source"]
-SCRIPTS_PLUGIN_DIR = "scripts/lib/wic/plugins"
+SCRIPTS_PLUGIN_DIR = ["scripts/lib/wic/plugins", "lib/wic/plugins"]
logger = logging.getLogger('wic')
@@ -38,10 +38,11 @@ class PluginMgr:
cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
layers = get_bitbake_var("BBLAYERS") or ''
for layer_path in layers.split():
- path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR)
- path = os.path.abspath(os.path.expanduser(path))
- if path not in cls._plugin_dirs and os.path.isdir(path):
- cls._plugin_dirs.insert(0, path)
+ for script_plugin_dir in SCRIPTS_PLUGIN_DIR:
+ path = os.path.join(layer_path, script_plugin_dir)
+ path = os.path.abspath(os.path.expanduser(path))
+ if path not in cls._plugin_dirs and os.path.isdir(path):
+ cls._plugin_dirs.insert(0, path)
if ptype not in PLUGINS:
# load all ptype plugins
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
index 2441cc33ad..5148df288a 100644
--- a/scripts/lib/wic/plugins/imager/direct.py
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -403,7 +403,7 @@ class PartitionedImage():
# Reserve a sector for EBR for every logical partition
# before alignment is performed.
if part.type == 'logical':
- self.offset += 1
+ self.offset += 2
align_sectors = 0
if part.align:
@@ -446,7 +446,7 @@ class PartitionedImage():
self.extendedpart = part.num
else:
self.extended_size_sec += align_sectors
- self.extended_size_sec += part.size_sec + 1
+ self.extended_size_sec += part.size_sec + 2
else:
self.primary_part_num += 1
part.num = self.primary_part_num
@@ -512,7 +512,7 @@ class PartitionedImage():
# add a sector at the back, so that there is enough
# room for all logical partitions.
self._create_partition(self.path, "extended",
- None, part.start - 1,
+ None, part.start - 2,
self.extended_size_sec)
if part.fstype == "swap":
diff --git a/scripts/oe-build-perf-report b/scripts/oe-build-perf-report
index 21bde7e156..e781f4f03f 100755
--- a/scripts/oe-build-perf-report
+++ b/scripts/oe-build-perf-report
@@ -372,7 +372,7 @@ def print_html_report(data, id_comp, buildstats):
chart_opts=chart_opts))
-def get_buildstats(repo, notes_ref, revs, outdir=None):
+def get_buildstats(repo, notes_ref, notes_ref2, revs, outdir=None):
"""Get the buildstats from git notes"""
full_ref = 'refs/notes/' + notes_ref
if not repo.rev_parse(full_ref):
@@ -391,8 +391,13 @@ def get_buildstats(repo, notes_ref, revs, outdir=None):
for tag in rev.tags:
log.debug(' %s', tag)
try:
- bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref,
- 'show', tag + '^0']))
+ try:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref, 'show', tag + '^0']))
+ except GitError:
+ if notes_ref2:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref2, 'show', tag + '^0']))
+ else:
+ raise
except GitError:
log.warning("Buildstats not found for %s", tag)
bs_all = {}
@@ -589,9 +594,12 @@ def main(argv=None):
buildstats = None
if args.dump_buildstats or args.html:
outdir = 'oe-build-perf-buildstats' if args.dump_buildstats else None
- notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch,
- args.machine)
- buildstats = get_buildstats(repo, notes_ref, [rev_l, rev_r], outdir)
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
+ notes_ref2 = None
+ if args.branch2:
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch2, args.machine)
+ notes_ref2 = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
+ buildstats = get_buildstats(repo, notes_ref, notes_ref2, [rev_l, rev_r], outdir)
# Print report
if not args.html: